From c4f5dd9f4d45530ad22f165f5ec88736534e5827 Mon Sep 17 00:00:00 2001 From: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Date: Wed, 11 Oct 2023 07:31:18 -0700 Subject: [PATCH 001/178] Fix flaky query profile phase tests with concurrent search enabled (#10547) (#10547) Signed-off-by: Ticheng Lin --- CHANGELOG.md | 2 +- .../search/query/QueryProfilePhaseTests.java | 22 +++++-------------- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 572a8346a0686..2e107d2c04539 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -158,4 +158,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index 92d27032b62e3..28af8a63cfba8 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -1066,10 +1066,6 @@ public void testIndexSortScrollOptimization() throws Exception { } public void testDisableTopScoreCollection() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10469", - executor != null - ); Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(new StandardAnalyzer()); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); @@ -1108,10 +1104,10 @@ public void testDisableTopScoreCollection() throws Exception { assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); if (executor != null) { assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); @@ -1241,10 +1237,6 @@ public void testMinScore() throws Exception { } public void testMaxScore() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/9932", - executor != null - ); Directory dir = newDirectory(); final Sort sort = new Sort(new SortField("filter", SortField.Type.STRING)); IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); @@ -1286,7 +1278,7 @@ public void testMaxScore() throws Exception { assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(4L)); assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(1L)); } @@ -1364,10 +1356,6 @@ public void testMaxScore() throws Exception { } public void testCollapseQuerySearchResults() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10139", - executor != null - ); Directory dir = newDirectory(); final Sort sort = new Sort(new SortField("user", SortField.Type.INT)); IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); @@ -1412,7 +1400,7 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); @@ -1447,7 +1435,7 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); From 489de2a49a51e6a9a82fbfeefe29bc587b36206b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Oct 2023 11:38:15 -0400 Subject: [PATCH 002/178] Bump commons-io:commons-io from 2.13.0 to 2.14.0 in /plugins/ingest-attachment (#10294) * Bump commons-io:commons-io in /plugins/ingest-attachment Bumps commons-io:commons-io from 2.13.0 to 2.14.0. --- updated-dependencies: - dependency-name: commons-io:commons-io dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 | 1 - plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e107d2c04539..563f3e43e9707 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -123,6 +123,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) - Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) - Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 330a17c02bc7a..8945c09fca28b 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -57,7 +57,7 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.13.0' + api 'commons-io:commons-io:2.14.0' api "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection diff --git a/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 deleted file mode 100644 index c165136eb5822..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bb2bc9b4df17e2411533a0708a69f983bf5e83b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 new file mode 100644 index 0000000000000..33c5cfe53e01d --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 @@ -0,0 +1 @@ +a4c6e1f6c196339473cd2e1b037f0eb97c62755b \ No newline at end of file From c55e1b47bffd40defc86686e9c6961d32dd087c2 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 11 Oct 2023 14:03:49 -0400 Subject: [PATCH 003/178] Add the means to extract the contextual properties from HttpChannel, TcpChannel and TrasportChannel without excessive typecasting (#10562) * Add the means to extract the contextual properties from HttpChannel, TcpChannel and TrasportChannel without excessive typecasting Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko --------- Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../http/netty4/Netty4HttpChannel.java | 17 +++++++++++++++++ .../transport/netty4/Netty4TcpChannel.java | 13 +++++++++++++ .../java/org/opensearch/http/HttpChannel.java | 14 ++++++++++++++ .../tracing/channels/TraceableHttpChannel.java | 6 ++++++ .../channels/TraceableTcpTransportChannel.java | 6 ++++++ .../transport/TaskTransportChannel.java | 6 ++++++ .../org/opensearch/transport/TcpChannel.java | 15 +++++++++++++++ .../transport/TcpTransportChannel.java | 5 +++++ .../opensearch/transport/TransportChannel.java | 15 +++++++++++++++ 10 files changed, 98 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 563f3e43e9707..5e31085eb3935 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -139,6 +139,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) - [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) - Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) ### Deprecated diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java index a83330356e35e..6475a0b744c60 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java @@ -40,6 +40,7 @@ import org.opensearch.transport.netty4.Netty4TcpChannel; import java.net.InetSocketAddress; +import java.util.Optional; import io.netty.channel.Channel; import io.netty.channel.ChannelPipeline; @@ -98,6 +99,22 @@ public Channel getNettyChannel() { return channel; } + @SuppressWarnings("unchecked") + @Override + public Optional get(String name, Class clazz) { + Object handler = getNettyChannel().pipeline().get(name); + + if (handler == null && inboundPipeline() != null) { + handler = inboundPipeline().get(name); + } + + if (handler != null && clazz.isInstance(handler) == true) { + return Optional.of((T) handler); + } + + return Optional.empty(); + } + @Override public String toString() { return "Netty4HttpChannel{" + "localAddress=" + getLocalAddress() + ", remoteAddress=" + getRemoteAddress() + '}'; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java index 5db1f7c333157..79a5bf9e95121 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java @@ -41,6 +41,7 @@ import org.opensearch.transport.TransportException; import java.net.InetSocketAddress; +import java.util.Optional; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; @@ -164,6 +165,18 @@ public void sendMessage(BytesReference reference, ActionListener listener) } } + @SuppressWarnings("unchecked") + @Override + public Optional get(String name, Class clazz) { + final Object handler = getNettyChannel().pipeline().get(name); + + if (handler != null && clazz.isInstance(handler) == true) { + return Optional.of((T) handler); + } + + return Optional.empty(); + } + public Channel getNettyChannel() { return channel; } diff --git a/server/src/main/java/org/opensearch/http/HttpChannel.java b/server/src/main/java/org/opensearch/http/HttpChannel.java index 6dcdaf9034413..679a5d73c7837 100644 --- a/server/src/main/java/org/opensearch/http/HttpChannel.java +++ b/server/src/main/java/org/opensearch/http/HttpChannel.java @@ -36,6 +36,7 @@ import org.opensearch.core.action.ActionListener; import java.net.InetSocketAddress; +import java.util.Optional; /** * Represents an HTTP comms channel @@ -72,4 +73,17 @@ default void handleException(Exception ex) {} */ InetSocketAddress getRemoteAddress(); + /** + * Returns the contextual property associated with this specific HTTP channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property + */ + default Optional get(String name, Class clazz) { + return Optional.empty(); + } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java index 03848e8e58207..0a9757310fe8b 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java @@ -18,6 +18,7 @@ import java.net.InetSocketAddress; import java.util.Objects; +import java.util.Optional; /** * Tracer wrapped {@link HttpChannel} @@ -92,4 +93,9 @@ public InetSocketAddress getLocalAddress() { public InetSocketAddress getRemoteAddress() { return delegate.getRemoteAddress(); } + + @Override + public Optional get(String name, Class clazz) { + return delegate.get(name, clazz); + } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java index 333e06eb037cb..bd60c35c3baac 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java @@ -20,6 +20,7 @@ import org.opensearch.transport.TransportChannel; import java.io.IOException; +import java.util.Optional; /** * Tracer wrapped {@link TransportChannel} @@ -109,4 +110,9 @@ public void sendResponse(Exception exception) throws IOException { public Version getVersion() { return delegate.getVersion(); } + + @Override + public Optional get(String name, Class clazz) { + return delegate.get(name, clazz); + } } diff --git a/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java b/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java index 052611317f174..4dab0039ec878 100644 --- a/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java @@ -37,6 +37,7 @@ import org.opensearch.core.transport.TransportResponse; import java.io.IOException; +import java.util.Optional; /** * Transport channel for tasks @@ -89,4 +90,9 @@ public Version getVersion() { public TransportChannel getChannel() { return channel; } + + @Override + public Optional get(String name, Class clazz) { + return getChannel().get(name, clazz); + } } diff --git a/server/src/main/java/org/opensearch/transport/TcpChannel.java b/server/src/main/java/org/opensearch/transport/TcpChannel.java index eac137ec30f1a..f98b65d0a4df1 100644 --- a/server/src/main/java/org/opensearch/transport/TcpChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpChannel.java @@ -38,6 +38,7 @@ import org.opensearch.core.common.bytes.BytesReference; import java.net.InetSocketAddress; +import java.util.Optional; /** * This is a tcp channel representing a single channel connection to another node. It is the base channel @@ -96,6 +97,20 @@ public interface TcpChannel extends CloseableChannel { */ ChannelStats getChannelStats(); + /** + * Returns the contextual property associated with this specific TCP channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property + */ + default Optional get(String name, Class clazz) { + return Optional.empty(); + } + /** * Channel statistics * diff --git a/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java b/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java index a7bedcf93e129..81de0af07ea7c 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java @@ -38,6 +38,7 @@ import org.opensearch.search.query.QuerySearchResult; import java.io.IOException; +import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -130,4 +131,8 @@ public Version getVersion() { return version; } + @Override + public Optional get(String name, Class clazz) { + return getChannel().get(name, clazz); + } } diff --git a/server/src/main/java/org/opensearch/transport/TransportChannel.java b/server/src/main/java/org/opensearch/transport/TransportChannel.java index 3c582127f28e8..7423d59103302 100644 --- a/server/src/main/java/org/opensearch/transport/TransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TransportChannel.java @@ -39,6 +39,7 @@ import org.opensearch.core.transport.TransportResponse; import java.io.IOException; +import java.util.Optional; /** * A transport channel allows to send a response to a request on the channel. @@ -78,4 +79,18 @@ static void sendErrorResponse(TransportChannel channel, String actionName, Trans ); } } + + /** + * Returns the contextual property associated with this specific transport channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property. + */ + default Optional get(String name, Class clazz) { + return Optional.empty(); + } } From a54108c6ef378dbbe432e6248240d4f1afd74047 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 11 Oct 2023 14:04:18 -0400 Subject: [PATCH 004/178] Bump netty from 4.1.99.Final to 4.1.100.Final (#10564) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.100.Final.jar.sha1 | 1 + .../transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.100.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.100.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.100.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.100.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.99.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.100.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.100.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 - 68 files changed, 35 insertions(+), 34 deletions(-) create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e31085eb3935..6055af47403bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index e54a5a1089a93..64f9f9a8828ad 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -28,7 +28,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.99.Final +netty = 4.1.100.Final joda = 2.12.2 # client dependencies diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 deleted file mode 100644 index 5b393be40e945..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 deleted file mode 100644 index 45ea27d29a183..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6bb7fcd68b272..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 deleted file mode 100644 index f9bdefc6dd965..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index d53adfa649f5f..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 deleted file mode 100644 index 258f7c957dda0..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 deleted file mode 100644 index b8bc0a4370f58..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 deleted file mode 100644 index 247975e0a64c7..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6b7b66ea768e3..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..dfa4a0fbea94c --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +663b1b7bf3ff0f12fde4df20c72d9e94584ebffa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6c1112ed49775..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21c76a42a468faafac6c84f8aca775073fc8e345 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 deleted file mode 100644 index f9bdefc6dd965..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e9bc8c96aec7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +a9fbf4d64b08abed542eefd5f7aed4807edca56f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 deleted file mode 100644 index 717703c36e1ab..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -259bf1c5178c3e23bb89a2fab59b6d22846e3fa6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..35d9d82202274 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +af3cf676eed30184215426ecf0f0dde15555ea9c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 deleted file mode 100644 index 3f69ae54c5d4a..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c8a89ea89b06e120c57bdb3db14b9a47ca30bb3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..0948daa05fff6 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +62dbdf5f25eda75ea8456be1ed72b3fcb0d18774 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 deleted file mode 100644 index adef44a4e7da7..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -804d8b752847923d3bb81f24de604597047c9b2e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6b7b66ea768e3..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a9aa34392903e --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +5ef15a3ce29a792b7ad17438e5f84c617b3f2993 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 deleted file mode 100644 index 0756635018837..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a45aa70bc50d0500da5cdcd595cc838d87ada987 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 deleted file mode 100644 index 5b393be40e945..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 deleted file mode 100644 index 45ea27d29a183..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6bb7fcd68b272..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 deleted file mode 100644 index f9bdefc6dd965..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index d53adfa649f5f..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 deleted file mode 100644 index 258f7c957dda0..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 deleted file mode 100644 index b8bc0a4370f58..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 deleted file mode 100644 index 247975e0a64c7..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..5805fdaf411d1 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +78489936ca1d91483e34a31d04a3b0812386eb39 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 deleted file mode 100644 index 75b64ad4197d8..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -961bd5b8d97ea6a07168176462f398089a24b5c8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6b7b66ea768e3..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 deleted file mode 100644 index 5b393be40e945..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 deleted file mode 100644 index 45ea27d29a183..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6bb7fcd68b272..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index d53adfa649f5f..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 deleted file mode 100644 index 258f7c957dda0..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 deleted file mode 100644 index b8bc0a4370f58..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 deleted file mode 100644 index 247975e0a64c7..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file From f518e9dfd518e1ad31d62ff028b61ab91222204d Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 11 Oct 2023 21:17:10 -0400 Subject: [PATCH 005/178] Update OpenSearch 2.12 to use Apache Lucene 9.8.0 (#10578) Signed-off-by: Andriy Redko --- libs/core/src/main/java/org/opensearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 3c210e5c7cf96..eef4da719994c 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -96,7 +96,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_10_0 = new Version(2100099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_10_1 = new Version(2100199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); - public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version CURRENT = V_3_0_0; From 90c4297266495de41c3b09728ce82d1a9aec1d64 Mon Sep 17 00:00:00 2001 From: Ashish Date: Thu, 12 Oct 2023 10:26:34 +0530 Subject: [PATCH 006/178] Fix shard failure on flush during upload failures for remote indexes (#10513) Signed-off-by: Ashish Singh --- ...emoteStoreBackpressureAndResiliencyIT.java | 29 +++++++++++++++++++ .../opensearch/index/shard/IndexShard.java | 2 +- .../translog/InternalTranslogManager.java | 6 +++- .../index/translog/TranslogManager.java | 2 +- .../transfer/TranslogTransferManager.java | 4 +-- .../TranslogUploadFailedException.java | 27 +++++++++++++++++ .../index/engine/InternalEngineTests.java | 6 +++- .../index/translog/RemoteFsTranslogTests.java | 3 +- 8 files changed, 72 insertions(+), 7 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java index 2c6db6ae19a9a..98586b60dcc69 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java @@ -11,6 +11,7 @@ import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractAsyncTask; @@ -228,4 +229,32 @@ public void testSkipLoadGlobalCheckpointToReplicationTracker() { client().admin().cluster().prepareReroute().setRetryFailed(true).get(); ensureGreen(INDEX_NAME); } + + public void testFlushDuringRemoteUploadFailures() { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + logger.info("--> Indexing data"); + indexData(randomIntBetween(1, 2), true); + logger.info("--> Indexing succeeded"); + ensureGreen(INDEX_NAME); + + MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(TRANSLOG_REPOSITORY_NAME); + logger.info("--> Failing all remote store interaction"); + translogRepo.setRandomControlIOExceptionRate(1d); + + Exception ex = assertThrows(UncategorizedExecutionException.class, () -> indexSingleDoc()); + assertEquals("Failed execution", ex.getMessage()); + + FlushResponse flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet(); + assertEquals(1, flushResponse.getFailedShards()); + ensureGreen(INDEX_NAME); + + logger.info("--> Stop failing all remote store interactions"); + translogRepo.setRandomControlIOExceptionRate(0d); + flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet(); + assertEquals(1, flushResponse.getSuccessfulShards()); + assertEquals(0, flushResponse.getFailedShards()); + } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 6ac75c7bfec17..9489c7d7fc1dd 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1475,7 +1475,7 @@ public void trimTranslog() { /** * Rolls the tranlog generation and cleans unneeded. */ - public void rollTranslogGeneration() { + public void rollTranslogGeneration() throws IOException { final Engine engine = getEngine(); engine.translogManager().rollTranslogGeneration(); } diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index 4bc9a711894b7..85c52b907d326 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -18,6 +18,7 @@ import org.opensearch.index.engine.LifecycleAware; import org.opensearch.index.seqno.LocalCheckpointTracker; import org.opensearch.index.translog.listener.TranslogEventListener; +import org.opensearch.index.translog.transfer.TranslogUploadFailedException; import java.io.Closeable; import java.io.IOException; @@ -83,11 +84,14 @@ public InternalTranslogManager( * Rolls the translog generation and cleans unneeded. */ @Override - public void rollTranslogGeneration() throws TranslogException { + public void rollTranslogGeneration() throws TranslogException, IOException { try (ReleasableLock ignored = readLock.acquire()) { engineLifeCycleAware.ensureOpen(); translog.rollGeneration(); translog.trimUnreferencedReaders(); + } catch (TranslogUploadFailedException e) { + // Do not trigger the translogEventListener as it fails the Engine while this is only an issue with remote upload + throw e; } catch (AlreadyClosedException e) { translogEventListener.onFailure("translog roll generation failed", e); throw e; diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java index 78aaa1bc13a00..148fd67fb413e 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java @@ -21,7 +21,7 @@ public interface TranslogManager { /** * Rolls the translog generation and cleans unneeded. */ - void rollTranslogGeneration() throws TranslogException; + void rollTranslogGeneration() throws TranslogException, IOException; /** * Performs recovery from the transaction log up to {@code recoverUpToSeqNo} (inclusive). diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index d988b8a6254ff..ece6f6d5a534f 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -176,7 +176,7 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans remoteTranslogTransferTracker.addUploadTimeInMillis((System.nanoTime() - metadataUploadStartTime) / 1_000_000L); remoteTranslogTransferTracker.addUploadBytesFailed(metadataBytesToUpload); // outer catch handles capturing stats on upload failure - throw exception; + throw new TranslogUploadFailedException("Failed to upload " + tlogMetadata.getName(), exception); } remoteTranslogTransferTracker.addUploadTimeInMillis((System.nanoTime() - metadataUploadStartTime) / 1_000_000L); @@ -185,7 +185,7 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans translogTransferListener.onUploadComplete(transferSnapshot); return true; } else { - Exception ex = new IOException("Failed to upload " + exceptionList.size() + " files during transfer"); + Exception ex = new TranslogUploadFailedException("Failed to upload " + exceptionList.size() + " files during transfer"); exceptionList.forEach(ex::addSuppressed); throw ex; } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java new file mode 100644 index 0000000000000..4a9b10ec5a52e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import java.io.IOException; + +/** + * Exception is thrown if there are any exceptions while uploading translog to remote store. + * @opensearch.internal + */ +public class TranslogUploadFailedException extends IOException { + + public TranslogUploadFailedException(String message) { + super(message); + } + + public TranslogUploadFailedException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index b8bb73bb89a82..305c3a3acbf75 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -7256,7 +7256,11 @@ public void testMaxSeqNoInCommitUserData() throws Exception { engine.ensureOpen(); while (running.get() && assertAndGetInternalTranslogManager(engine.translogManager()).getTranslog().currentFileGeneration() < 500) { - engine.translogManager().rollTranslogGeneration(); // make adding operations to translog slower + try { + engine.translogManager().rollTranslogGeneration(); // make adding operations to translog slower + } catch (IOException e) { + fail("io exception not expected"); + } } }); rollTranslog.start(); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index b2310010620f7..42e0df2dc90c1 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -47,6 +47,7 @@ import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.TranslogTransferManager; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.index.translog.transfer.TranslogUploadFailedException; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -1113,7 +1114,7 @@ public void testSyncUpAlwaysFailure() throws IOException { try { translog.sync(); fail("io exception expected"); - } catch (IOException e) { + } catch (TranslogUploadFailedException e) { assertTrue("at least one operation pending", translog.syncNeeded()); } } From 9c06228f2e39b3a7b46e9eb26aa2368dcbf4116a Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Thu, 12 Oct 2023 16:09:11 +0530 Subject: [PATCH 007/178] Fix system_repository setting is restored after cluster metadata recovery from local disk (#10556) Signed-off-by: bansvaru --- .../RemoteStoreRepositoryRegistrationIT.java | 19 +++++++++++++++++++ .../get/GetRepositoriesResponse.java | 7 +++++-- .../metadata/RepositoriesMetadata.java | 3 ++- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java index 002a149f0c286..ef2dcf3217df6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -8,21 +8,31 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesAction; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.transport.MockTransportService; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; + @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRepositoryRegistrationIT extends RemoteStoreBaseIntegTestCase { @@ -162,4 +172,13 @@ public void testNodeRestartPostNonRestrictedSettingsUpdate() throws Exception { ensureStableCluster(4); } + + public void testSystemRepositorySettingIsHiddenForGetRepositoriesRequest() throws IOException { + GetRepositoriesRequest request = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse repositoriesResponse = client().execute(GetRepositoriesAction.INSTANCE, request).actionGet(); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.JSON)); + XContentBuilder xContentBuilder = repositoriesResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); + repositoriesResponse = GetRepositoriesResponse.fromXContent(createParser(xContentBuilder)); + assertEquals(false, SYSTEM_REPOSITORY_SETTING.get(repositoriesResponse.repositories().get(0).settings())); + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index 098a0e60142e7..f8c8df25be532 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -42,8 +42,8 @@ import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; -import java.util.Collections; import java.util.List; +import java.util.Map; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; @@ -83,7 +83,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); repositories.toXContent( builder, - new DelegatingMapParams(Collections.singletonMap(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true"), params) + new DelegatingMapParams( + Map.of(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true", RepositoriesMetadata.HIDE_SYSTEM_REPOSITORY_SETTING, "true"), + params + ) ); builder.endObject(); return builder; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java index a5ef337c3b62a..e3689d046193c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java @@ -70,6 +70,7 @@ public class RepositoriesMetadata extends AbstractNamedDiffable implemen * in {@link org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse}. */ public static final String HIDE_GENERATIONS_PARAM = "hide_generations"; + public static final String HIDE_SYSTEM_REPOSITORY_SETTING = "hide_system_repository_setting"; private final List repositories; @@ -291,7 +292,7 @@ public static void toXContent(RepositoryMetadata repository, XContentBuilder bui repository.cryptoMetadata().toXContent(repository.cryptoMetadata(), builder, params); } Settings settings = repository.settings(); - if (SYSTEM_REPOSITORY_SETTING.get(settings)) { + if (SYSTEM_REPOSITORY_SETTING.get(settings) && params.paramAsBoolean(HIDE_SYSTEM_REPOSITORY_SETTING, false)) { settings = repository.settings().filter(s -> !s.equals(SYSTEM_REPOSITORY_SETTING.getKey())); } builder.startObject("settings"); From 6c022612769e6bc7ed18b9ec0888d6cd0dd415cc Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Thu, 12 Oct 2023 16:11:56 +0530 Subject: [PATCH 008/178] Validate checksum of each segment file post download from remote store (#10119) --------- Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../remotestore/RemoteStoreRestoreIT.java | 33 ++++++++++++- .../store/RemoteSegmentStoreDirectory.java | 4 ++ .../org/opensearch/index/store/Store.java | 47 ++++++++++++++++++- 3 files changed, 81 insertions(+), 3 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index 7626e3dba6424..212f797180077 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -10,8 +10,11 @@ import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; @@ -19,10 +22,12 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; +import org.opensearch.test.CorruptionUtils; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; import java.util.HashMap; import java.util.Locale; @@ -30,13 +35,14 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThan; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRestoreIT extends BaseRemoteStoreRestoreIT { /** @@ -461,5 +467,30 @@ public void testRateLimitedRemoteDownloads() throws Exception { } } + public void testRestoreCorruptSegmentShouldFail() throws IOException, ExecutionException, InterruptedException { + prepareCluster(1, 3, INDEX_NAME, 0, 1); + indexData(randomIntBetween(3, 4), true, INDEX_NAME); + + GetIndexResponse getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest()).get(); + String indexUUID = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + + logger.info("--> Corrupting segment files in remote segment store"); + Path path = segmentRepoPath.resolve(indexUUID).resolve("0").resolve("segments").resolve("data"); + try (Stream dataPath = Files.list(path)) { + CorruptionUtils.corruptFile(random(), dataPath.toArray(Path[]::new)); + } + + logger.info("--> Stop primary"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + + logger.info("--> Close and restore the index"); + client().admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME).waitForCompletion(true), PlainActionFuture.newFuture()); + + logger.info("--> Check for index status, should be red due to corruption"); + ensureRed(INDEX_NAME); + } + // TODO: Restore flow - index aliases } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index be1f2341236ab..6b43fed3d8930 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -290,6 +290,10 @@ public void setWrittenByMajor(int writtenByMajor) { ); } } + + public int getWrittenByMajor() { + return writtenByMajor; + } } /** diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index b822742de6e97..d0cd2635ba672 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -105,6 +105,7 @@ import java.io.UncheckedIOException; import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.text.ParseException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -120,6 +121,7 @@ import java.util.zip.CRC32; import java.util.zip.Checksum; +import static java.lang.Character.MAX_RADIX; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; @@ -975,7 +977,11 @@ public void copyFrom(Directory from, String src, String dest, IOContext context) boolean success = false; long startTime = System.currentTimeMillis(); try { - super.copyFrom(from, src, dest, context); + if (from instanceof RemoteSegmentStoreDirectory) { + copyFileAndValidateChecksum(from, src, dest, context, fileSize); + } else { + super.copyFrom(from, src, dest, context); + } success = true; afterDownload(fileSize, startTime); } finally { @@ -985,6 +991,43 @@ public void copyFrom(Directory from, String src, String dest, IOContext context) } } + private void copyFileAndValidateChecksum(Directory from, String src, String dest, IOContext context, long fileSize) + throws IOException { + RemoteSegmentStoreDirectory.UploadedSegmentMetadata metadata = ((RemoteSegmentStoreDirectory) from) + .getSegmentsUploadedToRemoteStore() + .get(dest); + boolean success = false; + try (IndexInput is = from.openInput(src, context); IndexOutput os = createOutput(dest, context)) { + // Here, we don't need the exact version as LuceneVerifyingIndexOutput does not verify version + // It is just used to emit logs when the entire metadata object is provided as parameter. Also, + // we can't provide null version as StoreFileMetadata has non-null check on writtenBy field. + Version luceneMajorVersion = Version.parse(metadata.getWrittenByMajor() + ".0.0"); + long checksum = Long.parseLong(metadata.getChecksum()); + StoreFileMetadata storeFileMetadata = new StoreFileMetadata( + dest, + fileSize, + Long.toString(checksum, MAX_RADIX), + luceneMajorVersion + ); + VerifyingIndexOutput verifyingIndexOutput = new LuceneVerifyingIndexOutput(storeFileMetadata, os); + verifyingIndexOutput.copyBytes(is, is.length()); + verifyingIndexOutput.verify(); + success = true; + } catch (ParseException e) { + throw new IOException("Exception while reading version info for segment file from remote store: " + dest, e); + } finally { + if (success == false) { + // If the exception is thrown after file is created, we clean up the file. + // We ignore the exception as the deletion is best-effort basis and can fail if file does not exist. + try { + deleteFile("Quietly deleting", dest); + } catch (Exception e) { + // Ignore + } + } + } + } + /** * Updates the amount of bytes attempted for download */ @@ -1476,7 +1519,7 @@ public static boolean isAutogenerated(String name) { * Produces a string representation of the given digest value. */ public static String digestToString(long digest) { - return Long.toString(digest, Character.MAX_RADIX); + return Long.toString(digest, MAX_RADIX); } /** From 382eb301b21bf5c6eda613432b9ddccda1c414b9 Mon Sep 17 00:00:00 2001 From: Louis Chu Date: Thu, 12 Oct 2023 17:31:25 -0400 Subject: [PATCH 009/178] Add release notes for 2.11 (#10594) Signed-off-by: Louis Chu --- CHANGELOG.md | 65 +--------------- .../opensearch.release-notes-2.11.0.md | 75 +++++++++++++++++++ 2 files changed, 76 insertions(+), 64 deletions(-) create mode 100644 release-notes/opensearch.release-notes-2.11.0.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 6055af47403bc..a9e5bb3982708 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,7 +33,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 - Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 - Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 -- Bump `org.apache.commons:commons-compress` from 1.22 to 1.23.0 - Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 - Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 - Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) @@ -46,7 +45,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) -- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -86,79 +84,18 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) -- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) -- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) -- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) -- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) -- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) -- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) -- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) -- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) -- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261)) ### Dependencies -- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) -- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) -- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) -- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972)) -- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) -- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) -- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) -- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) -- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) -- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) -- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) -- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) -- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) -- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) -- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) -- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) -- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) -- Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) -- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) -- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) -- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) -- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) ### Changed -- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) -- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) -- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) -- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) -- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) -- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) -- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) -- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) -- Add instrumentation in Inbound Handler. ([#100143](https://github.com/opensearch-project/OpenSearch/pull/10143)) -- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356)) -- [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) -- [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) -- Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) -- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) ### Deprecated ### Removed -- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) ### Fixed -- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) -- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) -- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) -- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) -- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) -- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) -- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) -- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) -- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x diff --git a/release-notes/opensearch.release-notes-2.11.0.md b/release-notes/opensearch.release-notes-2.11.0.md new file mode 100644 index 0000000000000..d7e9182f2a656 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.11.0.md @@ -0,0 +1,75 @@ +## 2023-10-12 Version 2.11.0 Release Notes + +## [2.11] + +### Added +- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) +- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) +- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) +- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) +- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) +- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) +- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) +- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) +- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) +- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261)) + +### Dependencies +- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) +- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) +- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) +- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972)) +- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) +- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) +- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) +- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) +- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) +- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) +- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) +- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) +- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) +- Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) +- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) +- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) + +### Changed +- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) +- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) +- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) +- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) +- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) +- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) +- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) +- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) +- Add instrumentation in Inbound Handler. ([#100143](https://github.com/opensearch-project/OpenSearch/pull/10143)) +- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356)) +- [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) +- [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) +- Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) + +### Removed +- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) + +### Fixed +- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) +- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) +- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) +- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) +- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) +- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) +- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) +- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) +- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) From 1447d75e5e9ea042dd4bb645d71f25aea1fb42a8 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Thu, 12 Oct 2023 15:30:02 -0700 Subject: [PATCH 010/178] Point deprecated settings to the new location. (#10522) I was looking through some cluster settings, stumbled across the search back-pressure settings, saw they were deprecated, and the Javadoc alluded to some myserious new settings. I'm adding links to the class with the new settings to help future developers. Signed-off-by: Michael Froh --- .../backpressure/settings/SearchBackpressureSettings.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java index 4c28d96d8289e..79494eb0d3c24 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java @@ -55,7 +55,7 @@ private static class Defaults { * Defines the percentage of tasks to cancel relative to the number of successful task completions. * In other words, it is the number of tokens added to the bucket on each successful task completion. *

- * The setting below is deprecated. + * The setting below is deprecated. The new setting is in {@link SearchShardTaskSettings}. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ public static final Setting SETTING_CANCELLATION_RATIO = Setting.doubleSetting( @@ -72,7 +72,7 @@ private static class Defaults { * Defines the number of tasks to cancel per unit time (in millis). * In other words, it is the number of tokens added to the bucket each millisecond. *

- * The setting below is deprecated. + * The setting below is deprecated. The new setting is in {@link SearchShardTaskSettings}. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ public static final Setting SETTING_CANCELLATION_RATE = Setting.doubleSetting( @@ -87,7 +87,7 @@ private static class Defaults { /** * Defines the maximum number of tasks that can be cancelled before being rate-limited. *

- * The setting below is deprecated. + * The setting below is deprecated. The new setting is in {@link SearchShardTaskSettings}. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ public static final Setting SETTING_CANCELLATION_BURST = Setting.doubleSetting( From 25f2b769f85c24258e2c49cc0d6b09d6b2500b99 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 13 Oct 2023 17:03:05 -0400 Subject: [PATCH 011/178] Bump OpenTelemetry from 1.30.1 to 1.31.0 (#10617) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 3 ++- plugins/telemetry-otel/build.gradle | 2 +- .../telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-context-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-context-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 | 1 + .../opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 | 1 - .../opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 | 1 + .../opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 | 1 - .../opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 | 1 + .../opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 | 1 - .../opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 | 1 + .../telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 | 1 + .../licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 | 1 - .../org/opensearch/telemetry/tracing/OTelResourceProvider.java | 2 +- 32 files changed, 19 insertions(+), 17 deletions(-) delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index a9e5bb3982708..51f477fc38da8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 64f9f9a8828ad..a5171aa582a86 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -68,4 +68,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.30.1 +opentelemetry = 1.31.0 +opentelemetrysemconv = 1.21.0-alpha diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 04fff20947b4f..f5c367cb7643b 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -28,7 +28,7 @@ dependencies { api "io.opentelemetry:opentelemetry-sdk-trace:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-sdk-metrics:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-logging:${versions.opentelemetry}" - api "io.opentelemetry:opentelemetry-semconv:${versions.opentelemetry}-alpha" + api "io.opentelemetry.semconv:opentelemetry-semconv:${versions.opentelemetrysemconv}" api "io.opentelemetry:opentelemetry-sdk-logs:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-otlp:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-common:${versions.opentelemetry}" diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 deleted file mode 100644 index b0ce00e191830..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a32dfbd7f01de6711fd0e970f8d4b4c0405056d6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..eae141a8d1a23 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 @@ -0,0 +1 @@ +bb24a44d73484c681c236aed84fe6c28d17f30e2 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 deleted file mode 100644 index 84cb60a2f7acb..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58f665ff01ce6b964cdf0b8cb5cd1c196dfe94ce \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..6e42973adc581 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b8004737f7a970124e36ac71fde8eb88423e8cee \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 deleted file mode 100644 index eccb15f7b7c8e..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f299d336dba1039478497f37b273dfa764c6faef \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..b119468e7f88b --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b7b4baf5f9af72d5eb8a231dfb114ae31c57150d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 deleted file mode 100644 index 40537a399ab14..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58f1a09e89955e6145babf8bcdf80c95174eb817 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..8f653922d6418 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 @@ -0,0 +1 @@ +260e5363dad83a0ae65c16ad6a3dd2914e0db201 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 deleted file mode 100644 index e88b7514ee54d..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -15692246539571c41180aff2b55abe527b939a7b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..103da4720de96 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b6454464425dfd81519070caeca3824558a2f1ae \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 deleted file mode 100644 index 86937743208c6..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -947cf43a6411c4a323e14594431040a476ad43e8 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..3db07532ceea9 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +d8c22b6851bbc3dbf5d2387b9bde158ed5416ba4 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 deleted file mode 100644 index 068926277253c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f3a14515500e4df260ce7b10a668237a95ac791 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..10d9b7cdfe3e3 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 @@ -0,0 +1 @@ +dd209381d58cfe81a989e29c9ca26d97c8dabd7a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 deleted file mode 100644 index bde43937e82e4..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bfcea9bd71f97dd4e8a4f92c15ba5659fb07ff05 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..162890965a6eb --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 @@ -0,0 +1 @@ +6c9f5c063309d92b6dd28bff0667f54b63afd36f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 deleted file mode 100644 index d425ed61cc4cd..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d15a9ea26e8e6ea93287a9f4ee02d91e5a74392 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..d6ce31a31cc6f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 @@ -0,0 +1 @@ +2b2093be08a09ac536292bf6cecf8129cc7fb191 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 deleted file mode 100644 index 6b32d98b0f7c7..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e437ba87004bb63069d04fb06beae65b98dd13a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..8a6a9705d836d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +f492528288236e097e12fc1c45963dd82c70d33c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 deleted file mode 100644 index 13ef6de11e82d..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5985d0950746ad12b49cc42c063f26ddfbcaaacb \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..37d79f5c573f7 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 @@ -0,0 +1 @@ +a63a203d3dc6f8875f8c26b9e3b522dc9a3f6280 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 deleted file mode 100644 index fc5aad9c9011e..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b12825541c5dae52a0fb35045c1b36df3ca8f632 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..80179e4808f50 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 @@ -0,0 +1 @@ +47cc23762fae728d68e4fda1dfb71986ae0b8b3e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 deleted file mode 100644 index ac522b765da05..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c5531fbc44178a7bcfeb7021ae80e70a7c43458 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..fd917a58ba77c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 @@ -0,0 +1 @@ +a3941197cfb8ae9eb9e482073480c0c3918b746c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..77b12c99464f6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 @@ -0,0 +1 @@ +207660e74d1e155272e9559fd4d27854b92fc6ac \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 deleted file mode 100644 index 089a2484dd1d5..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e8f7a97a4896a81846553275b9d61885be7ef50 \ No newline at end of file diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java index a6a1f12aab8a9..14a19f122c17b 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java @@ -30,7 +30,7 @@ import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; import io.opentelemetry.sdk.trace.export.SpanExporter; import io.opentelemetry.sdk.trace.samplers.Sampler; -import io.opentelemetry.semconv.resource.attributes.ResourceAttributes; +import io.opentelemetry.semconv.ResourceAttributes; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; From daf1350888f878868748172f576a0cdb3dc64b33 Mon Sep 17 00:00:00 2001 From: David Z <38449481+dzane17@users.noreply.github.com> Date: Fri, 13 Oct 2023 14:12:46 -0700 Subject: [PATCH 012/178] Per request phase latency (#10351) Signed-off-by: David Zane --- CHANGELOG.md | 1 + .../search/AbstractSearchAsyncAction.java | 2 + .../action/search/MultiSearchRequest.java | 5 + .../action/search/SearchRequest.java | 31 +++- .../action/search/SearchResponse.java | 132 +++++++++++++++++- .../action/search/SearchResponseMerger.java | 1 + .../action/search/TransportSearchAction.java | 129 +++++++++++++---- .../common/settings/ClusterSettings.java | 1 + .../rest/action/search/RestSearchAction.java | 6 + .../AbstractSearchAsyncActionTests.java | 15 +- .../action/search/SearchRequestTests.java | 1 + .../action/search/SearchResponseTests.java | 47 ++++++- .../search/SearchTimeProviderTests.java | 54 +++++++ .../search/RandomSearchRequestGenerator.java | 3 + 14 files changed, 394 insertions(+), 34 deletions(-) create mode 100644 server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 51f477fc38da8..a53d31c7861ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) ### Dependencies diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 1c0a1280ad550..14f57218ae1dc 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -215,6 +215,7 @@ public final void start() { 0, 0, buildTookInMillis(), + timeProvider.getPhaseTook(), ShardSearchFailure.EMPTY_ARRAY, clusters, null @@ -662,6 +663,7 @@ protected final SearchResponse buildSearchResponse( successfulOps.get(), skippedOps.get(), buildTookInMillis(), + timeProvider.getPhaseTook(), failures, clusters, searchContextId diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java index da8f8f144eaf2..00e0345062d1c 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java @@ -277,6 +277,8 @@ public static void readMultiLineFormat( } else if ("cancel_after_time_interval".equals(entry.getKey()) || "cancelAfterTimeInterval".equals(entry.getKey())) { searchRequest.setCancelAfterTimeInterval(nodeTimeValue(value, null)); + } else if ("phase_took".equals(entry.getKey())) { + searchRequest.setPhaseTook(nodeBooleanValue(value)); } else { throw new IllegalArgumentException("key [" + entry.getKey() + "] is not supported in the metadata section"); } @@ -374,6 +376,9 @@ public static void writeSearchRequestParams(SearchRequest request, XContentBuild if (request.getCancelAfterTimeInterval() != null) { xContentBuilder.field("cancel_after_time_interval", request.getCancelAfterTimeInterval().getStringRep()); } + if (request.isPhaseTook() != null) { + xContentBuilder.field("phase_took", request.isPhaseTook()); + } xContentBuilder.endObject(); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index 21cf0ed97b9da..9e50213eab5f9 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -117,6 +117,8 @@ public class SearchRequest extends ActionRequest implements IndicesRequest.Repla private String pipeline; + private Boolean phaseTook = null; + public SearchRequest() { this.localClusterAlias = null; this.absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; @@ -209,6 +211,7 @@ private SearchRequest( this.absoluteStartMillis = absoluteStartMillis; this.finalReduce = finalReduce; this.cancelAfterTimeInterval = searchRequest.cancelAfterTimeInterval; + this.phaseTook = searchRequest.phaseTook; } /** @@ -253,6 +256,9 @@ public SearchRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_7_0)) { pipeline = in.readOptionalString(); } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + phaseTook = in.readOptionalBoolean(); + } } @Override @@ -284,6 +290,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeOptionalString(pipeline); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalBoolean(phaseTook); + } } @Override @@ -615,6 +624,20 @@ public void setPreFilterShardSize(int preFilterShardSize) { this.preFilterShardSize = preFilterShardSize; } + /** + * Returns value of user-provided phase_took query parameter for this search request. + */ + public Boolean isPhaseTook() { + return phaseTook; + } + + /** + * Sets value of phase_took query param if provided by user. Defaults to null. + */ + public void setPhaseTook(Boolean phaseTook) { + this.phaseTook = phaseTook; + } + /** * Returns a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards * the search request expands to exceeds the threshold, or null if the threshold is unspecified. @@ -719,7 +742,8 @@ public boolean equals(Object o) { && absoluteStartMillis == that.absoluteStartMillis && ccsMinimizeRoundtrips == that.ccsMinimizeRoundtrips && Objects.equals(cancelAfterTimeInterval, that.cancelAfterTimeInterval) - && Objects.equals(pipeline, that.pipeline); + && Objects.equals(pipeline, that.pipeline) + && Objects.equals(phaseTook, that.phaseTook); } @Override @@ -740,7 +764,8 @@ public int hashCode() { localClusterAlias, absoluteStartMillis, ccsMinimizeRoundtrips, - cancelAfterTimeInterval + cancelAfterTimeInterval, + phaseTook ); } @@ -783,6 +808,8 @@ public String toString() { + cancelAfterTimeInterval + ", pipeline=" + pipeline + + ", phaseTook=" + + phaseTook + "}"; } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponse.java b/server/src/main/java/org/opensearch/action/search/SearchResponse.java index a546311a1f668..91f0dc0737637 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.search; import org.apache.lucene.search.TotalHits; +import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; @@ -63,7 +64,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.function.Supplier; @@ -94,6 +97,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb private final ShardSearchFailure[] shardFailures; private final Clusters clusters; private final long tookInMillis; + private final PhaseTook phaseTook; public SearchResponse(StreamInput in) throws IOException { super(in); @@ -112,6 +116,11 @@ public SearchResponse(StreamInput in) throws IOException { clusters = new Clusters(in); scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + phaseTook = in.readOptionalWriteable(PhaseTook::new); + } else { + phaseTook = null; + } skippedShards = in.readVInt(); pointInTimeId = in.readOptionalString(); } @@ -126,7 +135,32 @@ public SearchResponse( ShardSearchFailure[] shardFailures, Clusters clusters ) { - this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters, null); + this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, null, shardFailures, clusters, null); + } + + public SearchResponse( + SearchResponseSections internalResponse, + String scrollId, + int totalShards, + int successfulShards, + int skippedShards, + long tookInMillis, + ShardSearchFailure[] shardFailures, + Clusters clusters, + String pointInTimeId + ) { + this( + internalResponse, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + null, + shardFailures, + clusters, + pointInTimeId + ); } public SearchResponse( @@ -136,6 +170,7 @@ public SearchResponse( int successfulShards, int skippedShards, long tookInMillis, + PhaseTook phaseTook, ShardSearchFailure[] shardFailures, Clusters clusters, String pointInTimeId @@ -148,6 +183,7 @@ public SearchResponse( this.successfulShards = successfulShards; this.skippedShards = skippedShards; this.tookInMillis = tookInMillis; + this.phaseTook = phaseTook; this.shardFailures = shardFailures; assert skippedShards <= totalShards : "skipped: " + skippedShards + " total: " + totalShards; assert scrollId == null || pointInTimeId == null : "SearchResponse can't have both scrollId [" @@ -210,6 +246,13 @@ public TimeValue getTook() { return new TimeValue(tookInMillis); } + /** + * How long the request took in each search phase. + */ + public PhaseTook getPhaseTook() { + return phaseTook; + } + /** * The total number of shards the search was executed on. */ @@ -298,6 +341,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(POINT_IN_TIME_ID.getPreferredName(), pointInTimeId); } builder.field(TOOK.getPreferredName(), tookInMillis); + if (phaseTook != null) { + phaseTook.toXContent(builder, params); + } builder.field(TIMED_OUT.getPreferredName(), isTimedOut()); if (isTerminatedEarly() != null) { builder.field(TERMINATED_EARLY.getPreferredName(), isTerminatedEarly()); @@ -337,6 +383,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE Boolean terminatedEarly = null; int numReducePhases = 1; long tookInMillis = -1; + PhaseTook phaseTook = null; int successfulShards = -1; int totalShards = -1; int skippedShards = 0; // 0 for BWC @@ -401,6 +448,24 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE parser.skipChildren(); } } + } else if (PhaseTook.PHASE_TOOK.match(currentFieldName, parser.getDeprecationHandler())) { + Map phaseTookMap = new HashMap<>(); + + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + try { + SearchPhaseName.valueOf(currentFieldName.toUpperCase(Locale.ROOT)); + phaseTookMap.put(currentFieldName, parser.longValue()); + } catch (final IllegalArgumentException ex) { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + phaseTook = new PhaseTook(phaseTookMap); } else if (Clusters._CLUSTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { int successful = -1; int total = -1; @@ -472,6 +537,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE successfulShards, skippedShards, tookInMillis, + phaseTook, failures.toArray(ShardSearchFailure.EMPTY_ARRAY), clusters, searchContextId @@ -491,6 +557,9 @@ public void writeTo(StreamOutput out) throws IOException { clusters.writeTo(out); out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(phaseTook); + } out.writeVInt(skippedShards); out.writeOptionalString(pointInTimeId); } @@ -604,6 +673,67 @@ public String toString() { } } + /** + * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful + * and how many of them were skipped. + * + * @opensearch.internal + */ + public static class PhaseTook implements ToXContentFragment, Writeable { + static final ParseField PHASE_TOOK = new ParseField("phase_took"); + private final Map phaseTookMap; + + public PhaseTook(Map phaseTookMap) { + this.phaseTookMap = phaseTookMap; + } + + private PhaseTook(StreamInput in) throws IOException { + this(in.readMap(StreamInput::readString, StreamInput::readLong)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(phaseTookMap, StreamOutput::writeString, StreamOutput::writeLong); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(PHASE_TOOK.getPreferredName()); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + if (phaseTookMap.containsKey(searchPhaseName.getName())) { + builder.field(searchPhaseName.getName(), phaseTookMap.get(searchPhaseName.getName())); + } else { + builder.field(searchPhaseName.getName(), 0); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PhaseTook phaseTook = (PhaseTook) o; + + if (phaseTook.phaseTookMap.equals(phaseTookMap)) { + return true; + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(phaseTookMap); + } + } + static SearchResponse empty(Supplier tookInMillisSupplier, Clusters clusters) { SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); InternalSearchResponse internalSearchResponse = new InternalSearchResponse( diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java index f90e98106f93f..054bd578cc56c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java @@ -236,6 +236,7 @@ SearchResponse getMergedResponse(SearchResponse.Clusters clusters) { successfulShards, skippedShards, tookInMillis, + searchTimeProvider.getPhaseTook(), shardFailures, clusters, null diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index cff1005beff27..284f71bd9da62 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -98,6 +98,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -154,6 +155,14 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_PHASE_TOOK_ENABLED = Setting.boolSetting( + SEARCH_PHASE_TOOK_ENABLED_KEY, + false, + Property.Dynamic, + Property.NodeScope + ); + private final NodeClient client; private final ThreadPool threadPool; private final ClusterService clusterService; @@ -252,6 +261,8 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust } /** + * Listener to track request-level tookTime and phase tookTimes from the coordinator. + * * Search operations need two clocks. One clock is to fulfill real clock needs (e.g., resolving * "now" to an index name). Another clock is needed for measuring how long a search operation * took. These two uses are at odds with each other. There are many issues with using a real @@ -261,11 +272,12 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * * @opensearch.internal */ - static final class SearchTimeProvider { + static final class SearchTimeProvider implements SearchRequestOperationsListener { private final long absoluteStartMillis; private final long relativeStartNanos; private final LongSupplier relativeCurrentNanosProvider; + private boolean phaseTook = false; /** * Instantiates a new search time provider. The absolute start time is the real clock time @@ -291,6 +303,47 @@ long getAbsoluteStartMillis() { long buildTookInMillis() { return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); } + + public void setPhaseTook(boolean phaseTook) { + this.phaseTook = phaseTook; + } + + public boolean isPhaseTook() { + return phaseTook; + } + + SearchResponse.PhaseTook getPhaseTook() { + if (phaseTook) { + Map phaseTookMap = new HashMap<>(); + // Convert Map to Map for SearchResponse() + for (SearchPhaseName searchPhaseName : phaseStatsMap.keySet()) { + phaseTookMap.put(searchPhaseName.getName(), phaseStatsMap.get(searchPhaseName)); + } + return new SearchResponse.PhaseTook(phaseTookMap); + } else { + return null; + } + } + + Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + + @Override + public void onPhaseStart(SearchPhaseContext context) {} + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + phaseStatsMap.put( + context.getCurrentPhase().getSearchPhaseName(), + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos()) + ); + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) {} + + public Long getPhaseTookTime(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName); + } } @Override @@ -332,13 +385,6 @@ public void executeRequest( SinglePhaseSearchAction phaseSearchAction, ActionListener listener ) { - final List searchListenersList = createSearchListenerList(); - final SearchRequestOperationsListener searchRequestOperationsListener; - if (!CollectionUtils.isEmpty(searchListenersList)) { - searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); - } else { - searchRequestOperationsListener = null; - } executeRequest(task, searchRequest, new SearchAsyncActionProvider() { @Override public AbstractSearchAsyncAction asyncSearchAction( @@ -355,7 +401,8 @@ public AbstractSearchAsyncAction asyncSearchAction( ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { return new AbstractSearchAsyncAction( actionName, @@ -419,6 +466,16 @@ private void executeRequest( relativeStartNanos, System::nanoTime ); + + final List searchListenersList = createSearchListenerList(originalSearchRequest, timeProvider); + + final SearchRequestOperationsListener searchRequestOperationsListener; + if (!CollectionUtils.isEmpty(searchListenersList)) { + searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); + } else { + searchRequestOperationsListener = null; + } + PipelinedRequest searchRequest; ActionListener listener; try { @@ -462,7 +519,8 @@ private void executeRequest( clusterState, listener, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ); } else { if (shouldMinimizeRoundtrips(searchRequest)) { @@ -483,7 +541,8 @@ private void executeRequest( clusterState, l, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ) ); } else { @@ -533,7 +592,8 @@ private void executeRequest( listener, new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get()), searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ); }, listener::onFailure) ); @@ -622,6 +682,7 @@ public void onResponse(SearchResponse searchResponse) { searchResponse.getSuccessfulShards(), searchResponse.getSkippedShards(), timeProvider.buildTookInMillis(), + timeProvider.getPhaseTook(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0), searchResponse.pointInTimeId() @@ -811,7 +872,8 @@ private void executeLocalSearch( ClusterState clusterState, ActionListener listener, SearchContextId searchContext, - SearchAsyncActionProvider searchAsyncActionProvider + SearchAsyncActionProvider searchAsyncActionProvider, + SearchRequestOperationsListener searchRequestOperationsListener ) { executeSearch( (SearchTask) task, @@ -825,7 +887,8 @@ private void executeLocalSearch( listener, SearchResponse.Clusters.EMPTY, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ); } @@ -943,7 +1006,8 @@ private void executeSearch( ActionListener listener, SearchResponse.Clusters clusters, @Nullable SearchContextId searchContext, - SearchAsyncActionProvider searchAsyncActionProvider + SearchAsyncActionProvider searchAsyncActionProvider, + SearchRequestOperationsListener searchRequestOperationsListener ) { clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name @@ -1044,7 +1108,8 @@ private void executeSearch( listener, preFilterSearchShards, threadPool, - clusters + clusters, + searchRequestOperationsListener ).start(); } @@ -1127,15 +1192,30 @@ AbstractSearchAsyncAction asyncSearchAction( ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ); } - private List createSearchListenerList() { + private List createSearchListenerList(SearchRequest searchRequest, SearchTimeProvider timeProvider) { final List searchListenersList = new ArrayList<>(); + if (isRequestStatsEnabled) { searchListenersList.add(searchRequestStats); } + + // phase_took is enabled with request param and/or cluster setting + Boolean phaseTookRequestParam = searchRequest.isPhaseTook(); + if (phaseTookRequestParam == null) { // check cluster setting only when request param is undefined + if (clusterService.getClusterSettings().get(TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED)) { + timeProvider.setPhaseTook(true); + searchListenersList.add(timeProvider); + } + } else if (phaseTookRequestParam == true) { + timeProvider.setPhaseTook(true); + searchListenersList.add(timeProvider); + } + return searchListenersList; } @@ -1153,15 +1233,9 @@ private AbstractSearchAsyncAction searchAsyncAction ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { - final List searchListenersList = createSearchListenerList(); - final SearchRequestOperationsListener searchRequestOperationsListener; - if (!CollectionUtils.isEmpty(searchListenersList)) { - searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); - } else { - searchRequestOperationsListener = null; - } if (preFilter) { return new CanMatchPreFilterSearchPhase( logger, @@ -1192,7 +1266,8 @@ private AbstractSearchAsyncAction searchAsyncAction listener, false, threadPool, - clusters + clusters, + searchRequestOperationsListener ); return new SearchPhase(action.getName()) { @Override diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 4cd3490cffb4c..ad2b89aa3948d 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -375,6 +375,7 @@ public void apply(Settings value, Settings current, Settings previous) { TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, TransportSearchAction.SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING, TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED, + TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, SniffConnectionStrategy.REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java index ebfd082d974fd..080366e536da1 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java @@ -180,6 +180,12 @@ public static void parseSearchRequest( searchRequest.allowPartialSearchResults(request.paramAsBoolean("allow_partial_search_results", null)); } + if (request.hasParam("phase_took")) { + // only set if we have the parameter passed to override the cluster-level default + // else phaseTook = null + searchRequest.setPhaseTook(request.paramAsBoolean("phase_took", true)); + } + // do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types // from the REST layer. these modes are an internal optimization and should // not be specified explicitly by the user. diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index f628bb3201452..edac50813e191 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -688,7 +688,11 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct ); AtomicReference exception = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); - + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); return new SearchDfsQueryThenFetchAsyncAction( logger, null, @@ -702,7 +706,7 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct searchRequest, listener, shardsIter, - null, + timeProvider, null, task, SearchResponse.Clusters.EMPTY, @@ -734,6 +738,11 @@ private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( ); AtomicReference exception = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); return new SearchQueryThenFetchAsyncAction( logger, null, @@ -747,7 +756,7 @@ private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( searchRequest, listener, shardsIter, - null, + timeProvider, null, task, SearchResponse.Clusters.EMPTY, diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index 25d8c5551880f..cdd0ea863ce37 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -244,6 +244,7 @@ private SearchRequest mutate(SearchRequest searchRequest) { ); mutators.add(() -> mutation.source(randomValueOtherThan(searchRequest.source(), this::createSearchSourceBuilder))); mutators.add(() -> mutation.setCcsMinimizeRoundtrips(searchRequest.isCcsMinimizeRoundtrips() == false)); + mutators.add(() -> mutation.setPhaseTook(searchRequest.isPhaseTook() == false)); mutators.add( () -> mutation.setCancelAfterTimeInterval( searchRequest.getCancelAfterTimeInterval() != null diff --git a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java index 097e922147698..c9e59ab4ea04d 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java @@ -74,7 +74,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import static java.util.Collections.singletonMap; @@ -152,6 +154,11 @@ public SearchResponse createTestItem( Boolean terminatedEarly = randomBoolean() ? null : randomBoolean(); int numReducePhases = randomIntBetween(1, 10); long tookInMillis = randomNonNegativeLong(); + Map phaseTookMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseTookMap.put(searchPhaseName.getName(), randomNonNegativeLong()); + } + SearchResponse.PhaseTook phaseTook = new SearchResponse.PhaseTook(phaseTookMap); int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, totalShards); @@ -182,6 +189,7 @@ public SearchResponse createTestItem( successfulShards, skippedShards, tookInMillis, + phaseTook, shardSearchFailures, randomBoolean() ? randomClusters() : SearchResponse.Clusters.EMPTY, null @@ -353,6 +361,14 @@ public void testToXContent() { assertEquals(1, searchExtBuilders.size()); } { + Map phaseTookMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseTookMap.put(searchPhaseName.getName(), 0L); + } + phaseTookMap.put(SearchPhaseName.QUERY.getName(), 50L); + phaseTookMap.put(SearchPhaseName.FETCH.getName(), 25L); + phaseTookMap.put(SearchPhaseName.EXPAND.getName(), 30L); + SearchResponse.PhaseTook phaseTook = new SearchResponse.PhaseTook(phaseTookMap); SearchResponse response = new SearchResponse( new InternalSearchResponse( new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), @@ -368,13 +384,24 @@ public void testToXContent() { 0, 0, 0, + phaseTook, ShardSearchFailure.EMPTY_ARRAY, - new SearchResponse.Clusters(5, 3, 2) + new SearchResponse.Clusters(5, 3, 2), + null ); StringBuilder expectedString = new StringBuilder(); expectedString.append("{"); { expectedString.append("\"took\":0,"); + expectedString.append("\"phase_took\":"); + { + expectedString.append("{\"dfs_pre_query\":0,"); + expectedString.append("\"query\":50,"); + expectedString.append("\"fetch\":25,"); + expectedString.append("\"dfs_query\":0,"); + expectedString.append("\"expand\":30,"); + expectedString.append("\"can_match\":0},"); + } expectedString.append("\"timed_out\":false,"); expectedString.append("\"_shards\":"); { @@ -477,6 +504,24 @@ public void testToXContentEmptyClusters() throws IOException { assertEquals(0, builder.toString().length()); } + public void testSearchResponsePhaseTookEquals() throws IOException { + SearchResponse.PhaseTook phaseTookA = new SearchResponse.PhaseTook(Map.of("foo", 0L, "bar", 1L)); + SearchResponse.PhaseTook phaseTookB = new SearchResponse.PhaseTook(Map.of("foo", 1L, "bar", 1L)); + SearchResponse.PhaseTook phaseTookC = new SearchResponse.PhaseTook(Map.of("foo", 0L)); + SearchResponse.PhaseTook phaseTookD = new SearchResponse.PhaseTook(Map.of()); + + assertNotEquals(phaseTookA, phaseTookB); + assertNotEquals(phaseTookB, phaseTookA); + assertNotEquals(phaseTookA, phaseTookC); + assertNotEquals(phaseTookC, phaseTookA); + assertNotEquals(phaseTookA, phaseTookD); + assertNotEquals(phaseTookD, phaseTookA); + assertEquals(phaseTookA, phaseTookA); + assertEquals(phaseTookB, phaseTookB); + assertEquals(phaseTookC, phaseTookC); + assertEquals(phaseTookD, phaseTookD); + } + static class DummySearchExtBuilder extends SearchExtBuilder { static ParseField DUMMY_FIELD = new ParseField("dummy"); diff --git a/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java b/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java new file mode 100644 index 0000000000000..f0f1a43e6c21e --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchTimeProviderTests extends OpenSearchTestCase { + + public void testSearchTimeProviderPhaseFailure() { + TransportSearchAction.SearchTimeProvider testTimeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + testTimeProvider.onPhaseStart(ctx); + assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); + testTimeProvider.onPhaseFailure(ctx); + assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); + } + } + + public void testSearchTimeProviderPhaseEnd() { + TransportSearchAction.SearchTimeProvider testTimeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); + + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + long tookTimeInMillis = randomIntBetween(1, 100); + testTimeProvider.onPhaseStart(ctx); + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); + testTimeProvider.onPhaseEnd(ctx); + assertThat(testTimeProvider.getPhaseTookTime(searchPhaseName), greaterThanOrEqualTo(tookTimeInMillis)); + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java index b942136e1f1e2..74de1e6d96d93 100644 --- a/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java @@ -131,6 +131,9 @@ public static SearchRequest randomSearchRequest(Supplier ra if (randomBoolean()) { searchRequest.setCancelAfterTimeInterval(TimeValue.parseTimeValue(randomTimeValue(), null, "cancel_after_time_interval")); } + if (randomBoolean()) { + searchRequest.setPhaseTook(randomBoolean()); + } return searchRequest; } From 53be8f377a9de7b16da99def03b5e87f39cce430 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Fri, 13 Oct 2023 14:58:26 -0700 Subject: [PATCH 013/178] Fix broken test testCommitOnCloseThrowsException_decRefStore (#10621) This test was incorrectly hardcoding a segment to delete to force corruption. If a merge occurs randomly before segments are copied the expected segment did not exist failing the test. This will now look for any .si and delete it. Signed-off-by: Marc Handalian --- .../opensearch/index/engine/NRTReplicationEngineTests.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index ee25d3789fb13..09484cd1b5840 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -35,6 +35,7 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; @@ -591,7 +592,9 @@ public void testCommitOnCloseThrowsException_decRefStore() throws Exception { indexOperations(nrtEngine, operations); // wipe the nrt directory initially so we can sync with primary. cleanAndCopySegmentsFromPrimary(nrtEngine); - nrtEngineStore.directory().deleteFile("_0.si"); + final Optional toDelete = Set.of(nrtEngineStore.directory().listAll()).stream().filter(f -> f.endsWith(".si")).findAny(); + assertTrue(toDelete.isPresent()); + nrtEngineStore.directory().deleteFile(toDelete.get()); assertEquals(2, nrtEngineStore.refCount()); nrtEngine.close(); assertEquals(1, nrtEngineStore.refCount()); From 6c1bd487da8ca1794f29302d7a6c6a713f9c6a01 Mon Sep 17 00:00:00 2001 From: Bhumika Saini Date: Sat, 14 Oct 2023 19:46:09 +0530 Subject: [PATCH 014/178] [Remote Store] Add Remote Store backpressure rejection stats to _nodes/stats (#10524) Signed-off-by: Bhumika Saini --- CHANGELOG.md | 1 + .../index/remote/RemoteSegmentStats.java | 51 ++++++++++++++++--- .../remote/RemoteSegmentTransferTracker.java | 3 +- .../cluster/node/stats/NodeStatsTests.java | 1 + .../index/shard/IndexShardTests.java | 4 ++ 5 files changed, 52 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a53d31c7861ff..011e404c3d302 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -90,6 +90,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies ### Changed +- [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java index c7863536adf20..5992923a4157b 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java @@ -8,6 +8,7 @@ package org.opensearch.index.remote; +import org.opensearch.Version; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; @@ -75,6 +76,10 @@ public class RemoteSegmentStats implements Writeable, ToXContentFragment { * Total time spent in downloading segments from remote store */ private long totalDownloadTime; + /** + * Total rejections due to remote store upload backpressure + */ + private long totalRejections; public RemoteSegmentStats() {} @@ -90,6 +95,10 @@ public RemoteSegmentStats(StreamInput in) throws IOException { totalRefreshBytesLag = in.readLong(); totalUploadTime = in.readLong(); totalDownloadTime = in.readLong(); + // TODO: change to V_2_12_0 on main after backport to 2.x + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + totalRejections = in.readVLong(); + } } /** @@ -115,6 +124,7 @@ public RemoteSegmentStats(RemoteSegmentTransferTracker.Stats trackerStats) { this.totalRefreshBytesLag = trackerStats.bytesLag; this.totalUploadTime = trackerStats.totalUploadTimeInMs; this.totalDownloadTime = trackerStats.directoryFileTransferTrackerStats.totalTransferTimeInMs; + this.totalRejections = trackerStats.rejectionCount; } // Getter and setters. All are visible for testing @@ -207,6 +217,14 @@ public void addTotalDownloadTime(long totalDownloadTime) { this.totalDownloadTime += totalDownloadTime; } + public long getTotalRejections() { + return totalRejections; + } + + public void addTotalRejections(long totalRejections) { + this.totalRejections += totalRejections; + } + /** * Adds existing stats. Used for stats roll-ups at index or node level * @@ -225,6 +243,7 @@ public void add(RemoteSegmentStats existingStats) { this.totalRefreshBytesLag += existingStats.getTotalRefreshBytesLag(); this.totalUploadTime += existingStats.getTotalUploadTime(); this.totalDownloadTime += existingStats.getTotalDownloadTime(); + this.totalRejections += existingStats.totalRejections; } } @@ -241,18 +260,26 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(totalRefreshBytesLag); out.writeLong(totalUploadTime); out.writeLong(totalDownloadTime); + // TODO: change to V_2_12_0 on main after backport to 2.x + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(totalRejections); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.REMOTE_STORE); + builder.startObject(Fields.UPLOAD); buildUploadStats(builder); - builder.endObject(); + builder.endObject(); // UPLOAD + builder.startObject(Fields.DOWNLOAD); buildDownloadStats(builder); - builder.endObject(); - builder.endObject(); + builder.endObject(); // DOWNLOAD + + builder.endObject(); // REMOTE_STORE + return builder; } @@ -261,13 +288,19 @@ private void buildUploadStats(XContentBuilder builder) throws IOException { builder.humanReadableField(Fields.STARTED_BYTES, Fields.STARTED, new ByteSizeValue(uploadBytesStarted)); builder.humanReadableField(Fields.SUCCEEDED_BYTES, Fields.SUCCEEDED, new ByteSizeValue(uploadBytesSucceeded)); builder.humanReadableField(Fields.FAILED_BYTES, Fields.FAILED, new ByteSizeValue(uploadBytesFailed)); - builder.endObject(); + builder.endObject(); // TOTAL_UPLOAD_SIZE + builder.startObject(Fields.REFRESH_SIZE_LAG); builder.humanReadableField(Fields.TOTAL_BYTES, Fields.TOTAL, new ByteSizeValue(totalRefreshBytesLag)); builder.humanReadableField(Fields.MAX_BYTES, Fields.MAX, new ByteSizeValue(maxRefreshBytesLag)); - builder.endObject(); + builder.endObject(); // REFRESH_SIZE_LAG + builder.humanReadableField(Fields.MAX_REFRESH_TIME_LAG_IN_MILLIS, Fields.MAX_REFRESH_TIME_LAG, new TimeValue(maxRefreshTimeLag)); builder.humanReadableField(Fields.TOTAL_TIME_SPENT_IN_MILLIS, Fields.TOTAL_TIME_SPENT, new TimeValue(totalUploadTime)); + + builder.startObject(Fields.PRESSURE); + builder.field(Fields.TOTAL_REJECTIONS, totalRejections); + builder.endObject(); // PRESSURE } private void buildDownloadStats(XContentBuilder builder) throws IOException { @@ -300,6 +333,8 @@ static final class Fields { static final String MAX_BYTES = "max_bytes"; static final String TOTAL_TIME_SPENT = "total_time_spent"; static final String TOTAL_TIME_SPENT_IN_MILLIS = "total_time_spent_in_millis"; + static final String PRESSURE = "pressure"; + static final String TOTAL_REJECTIONS = "total_rejections"; } @Override @@ -318,7 +353,8 @@ public boolean equals(Object o) { && maxRefreshBytesLag == that.maxRefreshBytesLag && totalRefreshBytesLag == that.totalRefreshBytesLag && totalUploadTime == that.totalUploadTime - && totalDownloadTime == that.totalDownloadTime; + && totalDownloadTime == that.totalDownloadTime + && totalRejections == that.totalRejections; } @Override @@ -334,7 +370,8 @@ public int hashCode() { maxRefreshBytesLag, totalRefreshBytesLag, totalUploadTime, - totalDownloadTime + totalDownloadTime, + totalRejections ); } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index 05081180bb179..2a703f17aa953 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -267,7 +267,8 @@ public long getRejectionCount() { return rejectionCount.get(); } - void incrementRejectionCount() { + /** public only for testing **/ + public void incrementRejectionCount() { rejectionCount.incrementAndGet(); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index e3f16463a5328..e0b35c69cc3c0 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -815,6 +815,7 @@ private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { remoteSegmentStats.setMaxRefreshTimeLag(2L); remoteSegmentStats.addTotalUploadTime(20L); remoteSegmentStats.addTotalDownloadTime(20L); + remoteSegmentStats.addTotalRejections(5L); RemoteTranslogStats remoteTranslogStats = indicesStats.getTranslog().getRemoteTranslogStats(); RemoteTranslogStats otherRemoteTranslogStats = new RemoteTranslogStats(getRandomRemoteTranslogTransferTrackerStats()); diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index b2eb41828a4df..9ef9bec01cb38 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -4910,6 +4910,8 @@ private void populateSampleRemoteSegmentStats(RemoteSegmentTransferTracker track tracker.addUploadBytesStarted(30L); tracker.addUploadBytesSucceeded(10L); tracker.addUploadBytesFailed(10L); + tracker.incrementRejectionCount(); + tracker.incrementRejectionCount(); } private void populateSampleRemoteTranslogStats(RemoteTranslogTransferTracker tracker) { @@ -4943,5 +4945,7 @@ private static void assertRemoteSegmentStats( assertEquals(remoteSegmentTransferTracker.getUploadBytesStarted(), remoteSegmentStats.getUploadBytesStarted()); assertEquals(remoteSegmentTransferTracker.getUploadBytesSucceeded(), remoteSegmentStats.getUploadBytesSucceeded()); assertEquals(remoteSegmentTransferTracker.getUploadBytesFailed(), remoteSegmentStats.getUploadBytesFailed()); + assertTrue(remoteSegmentStats.getTotalRejections() > 0); + assertEquals(remoteSegmentTransferTracker.getRejectionCount(), remoteSegmentStats.getTotalRejections()); } } From 01a6ffdf10bfc49341016b244b56bdb1f79e0d11 Mon Sep 17 00:00:00 2001 From: Louis Chu Date: Sat, 14 Oct 2023 15:05:37 -0400 Subject: [PATCH 015/178] Update release note and changelog (#10609) Signed-off-by: Louis Chu --- CHANGELOG.md | 15 ++++++++++++-- .../opensearch.release-notes-2.11.0.md | 20 ++++++------------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 011e404c3d302..644fb05900ceb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,9 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) -- Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) -- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -88,8 +86,18 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) ### Dependencies +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) +- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) ### Changed +- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) +- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) - [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) ### Deprecated @@ -97,6 +105,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Removed ### Fixed +- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) +- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) +- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) ### Security diff --git a/release-notes/opensearch.release-notes-2.11.0.md b/release-notes/opensearch.release-notes-2.11.0.md index d7e9182f2a656..7ebf1b433c7c6 100644 --- a/release-notes/opensearch.release-notes-2.11.0.md +++ b/release-notes/opensearch.release-notes-2.11.0.md @@ -8,13 +8,16 @@ - Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) - [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) - Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) +- Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) - Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) - Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) - Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) +- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992)) - [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) - Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261)) ### Dependencies +- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) - Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) - Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) - Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) @@ -25,30 +28,22 @@ - Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) - Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) - Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) +- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) - Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) - Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) -- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) - Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` - Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` - Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) -- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) - Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) - Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) - Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) - Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) -- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) -- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) -- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) -- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) - Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) - Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) -- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) -- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) - Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) - [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) - Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) @@ -58,7 +53,6 @@ - [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) - [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) - Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) -- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) ### Removed - Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) @@ -68,8 +62,6 @@ - Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) - Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) - Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) -- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) -- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) - Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) +- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) - Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) -- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) From ead7ade0b78628defdd28b09ddaf1d276966c619 Mon Sep 17 00:00:00 2001 From: Bhumika Saini Date: Mon, 16 Oct 2023 12:00:54 +0530 Subject: [PATCH 016/178] Update version check (#10630) Signed-off-by: Bhumika Saini --- .../org/opensearch/index/remote/RemoteSegmentStats.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java index 5992923a4157b..4228ec60c4524 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java @@ -95,8 +95,7 @@ public RemoteSegmentStats(StreamInput in) throws IOException { totalRefreshBytesLag = in.readLong(); totalUploadTime = in.readLong(); totalDownloadTime = in.readLong(); - // TODO: change to V_2_12_0 on main after backport to 2.x - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { totalRejections = in.readVLong(); } } @@ -260,8 +259,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(totalRefreshBytesLag); out.writeLong(totalUploadTime); out.writeLong(totalDownloadTime); - // TODO: change to V_2_12_0 on main after backport to 2.x - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeVLong(totalRejections); } } From 368d35abb398b7e379db281358529e4cb689ae05 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Mon, 16 Oct 2023 17:01:57 +0530 Subject: [PATCH 017/178] Add resource usage trackers and resource usage collector service (#9890) --------- Signed-off-by: Bharathwaj G --- CHANGELOG.md | 1 + .../admin/cluster/node/stats/NodeStats.java | 23 ++- .../cluster/node/stats/NodesStatsRequest.java | 3 +- .../node/stats/TransportNodesStatsAction.java | 3 +- .../stats/TransportClusterStatsAction.java | 1 + .../common/settings/ClusterSettings.java | 5 + .../main/java/org/opensearch/node/Node.java | 25 ++- .../node/NodeResourceUsageStats.java | 81 ++++++++ .../java/org/opensearch/node/NodeService.java | 9 +- .../node/NodesResourceUsageStats.java | 69 +++++++ .../node/ResourceUsageCollectorService.java | 160 +++++++++++++++ .../tracker/AbstractAverageUsageTracker.java | 100 +++++++++ .../tracker/AverageCpuUsageTracker.java | 38 ++++ .../tracker/AverageMemoryUsageTracker.java | 42 ++++ .../tracker/NodeResourceUsageTracker.java | 118 +++++++++++ .../tracker/ResourceTrackerSettings.java | 90 +++++++++ .../node/resource/tracker/package-info.java | 12 ++ .../cluster/node/stats/NodeStatsTests.java | 45 +++++ .../opensearch/cluster/DiskUsageTests.java | 6 + .../ResourceUsageCollectorServiceTests.java | 190 ++++++++++++++++++ .../tracker/AverageUsageTrackerTests.java | 99 +++++++++ .../NodeResourceUsageTrackerTests.java | 96 +++++++++ .../MockInternalClusterInfoService.java | 1 + .../opensearch/test/InternalTestCluster.java | 1 + 24 files changed, 1211 insertions(+), 7 deletions(-) create mode 100644 server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java create mode 100644 server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java create mode 100644 server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java create mode 100644 server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java create mode 100644 server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java create mode 100644 server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java create mode 100644 server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java create mode 100644 server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java create mode 100644 server/src/main/java/org/opensearch/node/resource/tracker/package-info.java create mode 100644 server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java create mode 100644 server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java create mode 100644 server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 644fb05900ceb..2211f59574718 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index dd36b3b8db3ab..69efea186d927 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -56,6 +56,7 @@ import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.node.NodesResourceUsageStats; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; import org.opensearch.search.backpressure.stats.SearchBackpressureStats; @@ -142,6 +143,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private SearchPipelineStats searchPipelineStats; + @Nullable + private NodesResourceUsageStats resourceUsageStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -198,6 +202,11 @@ public NodeStats(StreamInput in) throws IOException { } else { searchPipelineStats = null; } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { // make it 2.12 when we backport + resourceUsageStats = in.readOptionalWriteable(NodesResourceUsageStats::new); + } else { + resourceUsageStats = null; + } } public NodeStats( @@ -216,6 +225,7 @@ public NodeStats( @Nullable DiscoveryStats discoveryStats, @Nullable IngestStats ingestStats, @Nullable AdaptiveSelectionStats adaptiveSelectionStats, + @Nullable NodesResourceUsageStats resourceUsageStats, @Nullable ScriptCacheStats scriptCacheStats, @Nullable IndexingPressureStats indexingPressureStats, @Nullable ShardIndexingPressureStats shardIndexingPressureStats, @@ -241,6 +251,7 @@ public NodeStats( this.discoveryStats = discoveryStats; this.ingestStats = ingestStats; this.adaptiveSelectionStats = adaptiveSelectionStats; + this.resourceUsageStats = resourceUsageStats; this.scriptCacheStats = scriptCacheStats; this.indexingPressureStats = indexingPressureStats; this.shardIndexingPressureStats = shardIndexingPressureStats; @@ -344,6 +355,11 @@ public AdaptiveSelectionStats getAdaptiveSelectionStats() { return adaptiveSelectionStats; } + @Nullable + public NodesResourceUsageStats getResourceUsageStats() { + return resourceUsageStats; + } + @Nullable public ScriptCacheStats getScriptCacheStats() { return scriptCacheStats; @@ -430,6 +446,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeOptionalWriteable(searchPipelineStats); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { // make it 2.12 when we backport + out.writeOptionalWriteable(resourceUsageStats); + } } @Override @@ -520,7 +539,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getSearchPipelineStats() != null) { getSearchPipelineStats().toXContent(builder, params); } - + if (getResourceUsageStats() != null) { + getResourceUsageStats().toXContent(builder, params); + } return builder; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index b0caa469033eb..99c9fb2d1e26a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -213,7 +213,8 @@ public enum Metric { WEIGHTED_ROUTING_STATS("weighted_routing"), FILE_CACHE_STATS("file_cache"), TASK_CANCELLATION("task_cancellation"), - SEARCH_PIPELINE("search_pipeline"); + SEARCH_PIPELINE("search_pipeline"), + RESOURCE_USAGE_STATS("resource_usage_stats"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 615abbaef845d..204157236a282 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -124,7 +124,8 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.WEIGHTED_ROUTING_STATS.containedIn(metrics), NodesStatsRequest.Metric.FILE_CACHE_STATS.containedIn(metrics), NodesStatsRequest.Metric.TASK_CANCELLATION.containedIn(metrics), - NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics) + NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics), + NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 18098bc31432f..d8323e209be23 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -168,6 +168,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index ad2b89aa3948d..bb0910c4e5f9c 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -130,6 +130,7 @@ import org.opensearch.node.Node.DiscoverySettings; import org.opensearch.node.NodeRoleSettings; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.decider.EnableAssignmentDecider; import org.opensearch.plugins.PluginsService; @@ -655,6 +656,10 @@ public void apply(Settings value, Settings current, Settings previous) { SegmentReplicationPressureService.MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING, SegmentReplicationPressureService.MAX_ALLOWED_STALE_SHARDS, + // Settings related to resource trackers + ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, + ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, + // Settings related to Searchable Snapshots Node.NODE_SEARCH_CACHE_SIZE_SETTING, FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING, diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 5b3b064a47c66..c456f01135dee 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -167,6 +167,7 @@ import org.opensearch.monitor.fs.FsProbe; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.PersistentTasksExecutor; import org.opensearch.persistent.PersistentTasksExecutorRegistry; @@ -805,7 +806,6 @@ protected Node( remoteStoreStatsTrackerFactory, recoverySettings ); - final AliasValidator aliasValidator = new AliasValidator(); final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService, systemIndices); @@ -1070,6 +1070,16 @@ protected Node( transportService.getTaskManager(), taskCancellationMonitoringSettings ); + final NodeResourceUsageTracker nodeResourceUsageTracker = new NodeResourceUsageTracker( + threadPool, + settings, + clusterService.getClusterSettings() + ); + final ResourceUsageCollectorService resourceUsageCollectorService = new ResourceUsageCollectorService( + nodeResourceUsageTracker, + clusterService, + threadPool + ); this.nodeService = new NodeService( settings, threadPool, @@ -1091,7 +1101,8 @@ protected Node( searchBackpressureService, searchPipelineService, fileCache, - taskCancellationMonitoringService + taskCancellationMonitoringService, + resourceUsageCollectorService ); final SearchService searchService = newSearchService( @@ -1212,6 +1223,8 @@ protected Node( b.bind(RerouteService.class).toInstance(rerouteService); b.bind(ShardLimitValidator.class).toInstance(shardLimitValidator); b.bind(FsHealthService.class).toInstance(fsHealthService); + b.bind(NodeResourceUsageTracker.class).toInstance(nodeResourceUsageTracker); + b.bind(ResourceUsageCollectorService.class).toInstance(resourceUsageCollectorService); b.bind(SystemIndices.class).toInstance(systemIndices); b.bind(IdentityService.class).toInstance(identityService); b.bind(Tracer.class).toInstance(tracer); @@ -1328,6 +1341,8 @@ public Node start() throws NodeValidationException { injector.getInstance(RepositoriesService.class).start(); injector.getInstance(SearchService.class).start(); injector.getInstance(FsHealthService.class).start(); + injector.getInstance(NodeResourceUsageTracker.class).start(); + injector.getInstance(ResourceUsageCollectorService.class).start(); nodeService.getMonitorService().start(); nodeService.getSearchBackpressureService().start(); nodeService.getTaskCancellationMonitoringService().start(); @@ -1490,6 +1505,8 @@ private Node stop() { injector.getInstance(ClusterService.class).stop(); injector.getInstance(NodeConnectionsService.class).stop(); injector.getInstance(FsHealthService.class).stop(); + injector.getInstance(NodeResourceUsageTracker.class).stop(); + injector.getInstance(ResourceUsageCollectorService.class).stop(); nodeService.getMonitorService().stop(); nodeService.getSearchBackpressureService().stop(); injector.getInstance(GatewayService.class).stop(); @@ -1553,6 +1570,10 @@ public synchronized void close() throws IOException { toClose.add(nodeService.getSearchBackpressureService()); toClose.add(() -> stopWatch.stop().start("fsHealth")); toClose.add(injector.getInstance(FsHealthService.class)); + toClose.add(() -> stopWatch.stop().start("resource_usage_tracker")); + toClose.add(injector.getInstance(NodeResourceUsageTracker.class)); + toClose.add(() -> stopWatch.stop().start("resource_usage_collector")); + toClose.add(injector.getInstance(ResourceUsageCollectorService.class)); toClose.add(() -> stopWatch.stop().start("gateway")); toClose.add(injector.getInstance(GatewayService.class)); toClose.add(() -> stopWatch.stop().start("search")); diff --git a/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java b/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java new file mode 100644 index 0000000000000..6ef66d4ac1914 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +/** + * This represents the resource usage stats of a node along with the timestamp at which the stats object was created + * in the respective node + */ +public class NodeResourceUsageStats implements Writeable { + final String nodeId; + long timestamp; + double cpuUtilizationPercent; + double memoryUtilizationPercent; + + public NodeResourceUsageStats(String nodeId, long timestamp, double memoryUtilizationPercent, double cpuUtilizationPercent) { + this.nodeId = nodeId; + this.timestamp = timestamp; + this.cpuUtilizationPercent = cpuUtilizationPercent; + this.memoryUtilizationPercent = memoryUtilizationPercent; + } + + public NodeResourceUsageStats(StreamInput in) throws IOException { + this.nodeId = in.readString(); + this.timestamp = in.readLong(); + this.cpuUtilizationPercent = in.readDouble(); + this.memoryUtilizationPercent = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.nodeId); + out.writeLong(this.timestamp); + out.writeDouble(this.cpuUtilizationPercent); + out.writeDouble(this.memoryUtilizationPercent); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("NodeResourceUsageStats["); + sb.append(nodeId).append("]("); + sb.append("Timestamp: ").append(timestamp); + sb.append(", CPU utilization percent: ").append(String.format(Locale.ROOT, "%.1f", cpuUtilizationPercent)); + sb.append(", Memory utilization percent: ").append(String.format(Locale.ROOT, "%.1f", memoryUtilizationPercent)); + sb.append(")"); + return sb.toString(); + } + + NodeResourceUsageStats(NodeResourceUsageStats nodeResourceUsageStats) { + this( + nodeResourceUsageStats.nodeId, + nodeResourceUsageStats.timestamp, + nodeResourceUsageStats.memoryUtilizationPercent, + nodeResourceUsageStats.cpuUtilizationPercent + ); + } + + public double getMemoryUtilizationPercent() { + return memoryUtilizationPercent; + } + + public double getCpuUtilizationPercent() { + return cpuUtilizationPercent; + } + + public long getTimestamp() { + return timestamp; + } +} diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 2688b894cb9a7..9bb07080fa717 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -83,6 +83,7 @@ public class NodeService implements Closeable { private final ScriptService scriptService; private final HttpServerTransport httpServerTransport; private final ResponseCollectorService responseCollectorService; + private final ResourceUsageCollectorService resourceUsageCollectorService; private final SearchTransportService searchTransportService; private final IndexingPressureService indexingPressureService; private final AggregationUsageService aggregationUsageService; @@ -114,7 +115,8 @@ public class NodeService implements Closeable { SearchBackpressureService searchBackpressureService, SearchPipelineService searchPipelineService, FileCache fileCache, - TaskCancellationMonitoringService taskCancellationMonitoringService + TaskCancellationMonitoringService taskCancellationMonitoringService, + ResourceUsageCollectorService resourceUsageCollectorService ) { this.settings = settings; this.threadPool = threadPool; @@ -137,6 +139,7 @@ public class NodeService implements Closeable { this.clusterService = clusterService; this.fileCache = fileCache; this.taskCancellationMonitoringService = taskCancellationMonitoringService; + this.resourceUsageCollectorService = resourceUsageCollectorService; clusterService.addStateApplier(ingestService); clusterService.addStateApplier(searchPipelineService); } @@ -217,7 +220,8 @@ public NodeStats stats( boolean weightedRoutingStats, boolean fileCacheStats, boolean taskCancellation, - boolean searchPipelineStats + boolean searchPipelineStats, + boolean resourceUsageStats ) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) @@ -237,6 +241,7 @@ public NodeStats stats( discoveryStats ? discovery.stats() : null, ingest ? ingestService.stats() : null, adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null, + resourceUsageStats ? resourceUsageCollectorService.stats() : null, scriptCache ? scriptService.cacheStats() : null, indexingPressure ? this.indexingPressureService.nodeStats() : null, shardIndexingPressure ? this.indexingPressureService.shardStats(indices) : null, diff --git a/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java b/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java new file mode 100644 index 0000000000000..3dff9a27f71a8 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; + +/** + * This class represents resource usage stats such as CPU, Memory and IO resource usage of each node along with the + * timestamp of the stats recorded. + */ +public class NodesResourceUsageStats implements Writeable, ToXContentFragment { + + // Map of node id to resource usage stats of the corresponding node. + private final Map nodeIdToResourceUsageStatsMap; + + public NodesResourceUsageStats(Map nodeIdToResourceUsageStatsMap) { + this.nodeIdToResourceUsageStatsMap = nodeIdToResourceUsageStatsMap; + } + + public NodesResourceUsageStats(StreamInput in) throws IOException { + this.nodeIdToResourceUsageStatsMap = in.readMap(StreamInput::readString, NodeResourceUsageStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(this.nodeIdToResourceUsageStatsMap, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); + } + + /** + * Returns map of node id to resource usage stats of the corresponding node. + */ + public Map getNodeIdToResourceUsageStatsMap() { + return nodeIdToResourceUsageStatsMap; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("resource_usage_stats"); + for (String nodeId : nodeIdToResourceUsageStatsMap.keySet()) { + builder.startObject(nodeId); + NodeResourceUsageStats resourceUsageStats = nodeIdToResourceUsageStatsMap.get(nodeId); + if (resourceUsageStats != null) { + builder.field("timestamp", resourceUsageStats.timestamp); + builder.field("cpu_utilization_percent", String.format(Locale.ROOT, "%.1f", resourceUsageStats.cpuUtilizationPercent)); + builder.field( + "memory_utilization_percent", + String.format(Locale.ROOT, "%.1f", resourceUsageStats.memoryUtilizationPercent) + ); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java b/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java new file mode 100644 index 0000000000000..f1c763e09f147 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterStateListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentMap; + +/** + * This collects node level resource usage statistics such as cpu, memory, IO of each node and makes it available for + * coordinator node to aid in throttling, ranking etc + */ +public class ResourceUsageCollectorService extends AbstractLifecycleComponent implements ClusterStateListener { + + /** + * This refresh interval denotes the polling interval of ResourceUsageCollectorService to refresh the resource usage + * stats from local node + */ + private static long REFRESH_INTERVAL_IN_MILLIS = 1000; + + private static final Logger logger = LogManager.getLogger(ResourceUsageCollectorService.class); + private final ConcurrentMap nodeIdToResourceUsageStats = ConcurrentCollections.newConcurrentMap(); + + private ThreadPool threadPool; + private volatile Scheduler.Cancellable scheduledFuture; + + private NodeResourceUsageTracker nodeResourceUsageTracker; + private ClusterService clusterService; + + public ResourceUsageCollectorService( + NodeResourceUsageTracker nodeResourceUsageTracker, + ClusterService clusterService, + ThreadPool threadPool + ) { + this.threadPool = threadPool; + this.nodeResourceUsageTracker = nodeResourceUsageTracker; + this.clusterService = clusterService; + clusterService.addListener(this); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.nodesRemoved()) { + for (DiscoveryNode removedNode : event.nodesDelta().removedNodes()) { + removeNodeResourceUsageStats(removedNode.getId()); + } + } + } + + void removeNodeResourceUsageStats(String nodeId) { + nodeIdToResourceUsageStats.remove(nodeId); + } + + /** + * Collect node resource usage stats along with the timestamp + */ + public void collectNodeResourceUsageStats( + String nodeId, + long timestamp, + double memoryUtilizationPercent, + double cpuUtilizationPercent + ) { + nodeIdToResourceUsageStats.compute(nodeId, (id, resourceUsageStats) -> { + if (resourceUsageStats == null) { + return new NodeResourceUsageStats(nodeId, timestamp, memoryUtilizationPercent, cpuUtilizationPercent); + } else { + resourceUsageStats.cpuUtilizationPercent = cpuUtilizationPercent; + resourceUsageStats.memoryUtilizationPercent = memoryUtilizationPercent; + resourceUsageStats.timestamp = timestamp; + return resourceUsageStats; + } + }); + } + + /** + * Get all node resource usage statistics which will be used for node stats + */ + public Map getAllNodeStatistics() { + Map nodeStats = new HashMap<>(nodeIdToResourceUsageStats.size()); + nodeIdToResourceUsageStats.forEach((nodeId, resourceUsageStats) -> { + nodeStats.put(nodeId, new NodeResourceUsageStats(resourceUsageStats)); + }); + return nodeStats; + } + + /** + * Optionally return a {@code NodeResourceUsageStats} for the given nodeid, if + * resource usage stats information exists for the given node. Returns an empty + * {@code Optional} if the node was not found. + */ + public Optional getNodeStatistics(final String nodeId) { + return Optional.ofNullable(nodeIdToResourceUsageStats.get(nodeId)) + .map(resourceUsageStats -> new NodeResourceUsageStats(resourceUsageStats)); + } + + /** + * Returns collected resource usage statistics of all nodes + */ + public NodesResourceUsageStats stats() { + return new NodesResourceUsageStats(getAllNodeStatistics()); + } + + /** + * Fetch local node resource usage statistics and add it to store along with the current timestamp + */ + private void collectLocalNodeResourceUsageStats() { + if (nodeResourceUsageTracker.isReady() && clusterService.state() != null) { + collectNodeResourceUsageStats( + clusterService.state().nodes().getLocalNodeId(), + System.currentTimeMillis(), + nodeResourceUsageTracker.getMemoryUtilizationPercent(), + nodeResourceUsageTracker.getCpuUtilizationPercent() + ); + } + } + + @Override + protected void doStart() { + /** + * Fetch local node resource usage statistics every second + */ + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + try { + collectLocalNodeResourceUsageStats(); + } catch (Exception e) { + logger.warn("failure in ResourceUsageCollectorService", e); + } + }, new TimeValue(REFRESH_INTERVAL_IN_MILLIS), ThreadPool.Names.GENERIC); + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java new file mode 100644 index 0000000000000..f83a1b7f9fc05 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.MovingAverage; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.concurrent.atomic.AtomicReference; + +/** + * Base class for sliding window resource usage trackers + */ +public abstract class AbstractAverageUsageTracker extends AbstractLifecycleComponent { + private static final Logger LOGGER = LogManager.getLogger(AbstractAverageUsageTracker.class); + + private final ThreadPool threadPool; + private final TimeValue pollingInterval; + private TimeValue windowDuration; + private final AtomicReference observations = new AtomicReference<>(); + + private volatile Scheduler.Cancellable scheduledFuture; + + public AbstractAverageUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + this.threadPool = threadPool; + this.pollingInterval = pollingInterval; + this.windowDuration = windowDuration; + this.setWindowSize(windowDuration); + } + + public abstract long getUsage(); + + /** + * Returns the moving average of the datapoints + */ + public double getAverage() { + return observations.get().getAverage(); + } + + /** + * Checks if we have datapoints more than or equal to the window size + */ + public boolean isReady() { + return observations.get().isReady(); + } + + /** + * Creates a new instance of MovingAverage with a new window size based on WindowDuration + */ + public void setWindowSize(TimeValue windowDuration) { + this.windowDuration = windowDuration; + int windowSize = (int) (windowDuration.nanos() / pollingInterval.nanos()); + LOGGER.debug("updated window size: {}", windowSize); + observations.set(new MovingAverage(windowSize)); + } + + public TimeValue getPollingInterval() { + return pollingInterval; + } + + public TimeValue getWindowDuration() { + return windowDuration; + } + + public long getWindowSize() { + return observations.get().getCount(); + } + + public void recordUsage(long usage) { + observations.get().record(usage); + } + + @Override + protected void doStart() { + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + long usage = getUsage(); + recordUsage(usage); + }, pollingInterval, ThreadPool.Names.GENERIC); + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java new file mode 100644 index 0000000000000..160d385762eb0 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.process.ProcessProbe; +import org.opensearch.threadpool.ThreadPool; + +/** + * AverageCpuUsageTracker tracks the average CPU usage by polling the CPU usage every (pollingInterval) + * and keeping track of the rolling average over a defined time window (windowDuration). + */ +public class AverageCpuUsageTracker extends AbstractAverageUsageTracker { + private static final Logger LOGGER = LogManager.getLogger(AverageCpuUsageTracker.class); + + public AverageCpuUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + super(threadPool, pollingInterval, windowDuration); + } + + /** + * Returns the process CPU usage in percent + */ + @Override + public long getUsage() { + long usage = ProcessProbe.getInstance().getProcessCpuPercent(); + LOGGER.debug("Recording cpu usage: {}%", usage); + return usage; + } + +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java new file mode 100644 index 0000000000000..c1d1c83656859 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; + +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; + +/** + * AverageMemoryUsageTracker tracks the average JVM usage by polling the JVM usage every (pollingInterval) + * and keeping track of the rolling average over a defined time window (windowDuration). + */ +public class AverageMemoryUsageTracker extends AbstractAverageUsageTracker { + + private static final Logger LOGGER = LogManager.getLogger(AverageMemoryUsageTracker.class); + + private static final MemoryMXBean MEMORY_MX_BEAN = ManagementFactory.getMemoryMXBean(); + + public AverageMemoryUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + super(threadPool, pollingInterval, windowDuration); + } + + /** + * Get current memory usage percentage calculated against max heap memory + */ + @Override + public long getUsage() { + long usage = MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed() * 100 / MEMORY_MX_BEAN.getHeapMemoryUsage().getMax(); + LOGGER.debug("Recording memory usage: {}%", usage); + return usage; + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java new file mode 100644 index 0000000000000..cf5f38c1b004c --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java @@ -0,0 +1,118 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; + +/** + * This tracks the usage of node resources such as CPU, IO and memory + */ +public class NodeResourceUsageTracker extends AbstractLifecycleComponent { + private ThreadPool threadPool; + private final ClusterSettings clusterSettings; + private AverageCpuUsageTracker cpuUsageTracker; + private AverageMemoryUsageTracker memoryUsageTracker; + + private ResourceTrackerSettings resourceTrackerSettings; + + public NodeResourceUsageTracker(ThreadPool threadPool, Settings settings, ClusterSettings clusterSettings) { + this.threadPool = threadPool; + this.clusterSettings = clusterSettings; + this.resourceTrackerSettings = new ResourceTrackerSettings(settings); + initialize(); + } + + /** + * Return CPU utilization average if we have enough datapoints, otherwise return 0 + */ + public double getCpuUtilizationPercent() { + if (cpuUsageTracker.isReady()) { + return cpuUsageTracker.getAverage(); + } + return 0.0; + } + + /** + * Return memory utilization average if we have enough datapoints, otherwise return 0 + */ + public double getMemoryUtilizationPercent() { + if (memoryUsageTracker.isReady()) { + return memoryUsageTracker.getAverage(); + } + return 0.0; + } + + /** + * Checks if all of the resource usage trackers are ready + */ + public boolean isReady() { + return memoryUsageTracker.isReady() && cpuUsageTracker.isReady(); + } + + void initialize() { + cpuUsageTracker = new AverageCpuUsageTracker( + threadPool, + resourceTrackerSettings.getCpuPollingInterval(), + resourceTrackerSettings.getCpuWindowDuration() + ); + clusterSettings.addSettingsUpdateConsumer( + ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, + this::setCpuWindowDuration + ); + + memoryUsageTracker = new AverageMemoryUsageTracker( + threadPool, + resourceTrackerSettings.getMemoryPollingInterval(), + resourceTrackerSettings.getMemoryWindowDuration() + ); + clusterSettings.addSettingsUpdateConsumer( + ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, + this::setMemoryWindowDuration + ); + } + + private void setMemoryWindowDuration(TimeValue windowDuration) { + memoryUsageTracker.setWindowSize(windowDuration); + resourceTrackerSettings.setMemoryWindowDuration(windowDuration); + } + + private void setCpuWindowDuration(TimeValue windowDuration) { + cpuUsageTracker.setWindowSize(windowDuration); + resourceTrackerSettings.setCpuWindowDuration(windowDuration); + } + + /** + * Visible for testing + */ + ResourceTrackerSettings getResourceTrackerSettings() { + return resourceTrackerSettings; + } + + @Override + protected void doStart() { + cpuUsageTracker.doStart(); + memoryUsageTracker.doStart(); + } + + @Override + protected void doStop() { + cpuUsageTracker.doStop(); + memoryUsageTracker.doStop(); + } + + @Override + protected void doClose() { + cpuUsageTracker.doClose(); + memoryUsageTracker.doClose(); + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java b/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java new file mode 100644 index 0000000000000..f81b008ba7e8b --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +/** + * Settings related to resource usage trackers such as polling interval, window duration etc + */ +public class ResourceTrackerSettings { + + private static class Defaults { + /** + * This is the default polling interval of usage trackers to get the resource utilization data + */ + private static final long POLLING_INTERVAL_IN_MILLIS = 500; + /** + * This is the default window duration on which the average resource utilization values will be calculated + */ + private static final long WINDOW_DURATION_IN_SECONDS = 30; + } + + public static final Setting GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_cpu_usage.polling_interval", + TimeValue.timeValueMillis(Defaults.POLLING_INTERVAL_IN_MILLIS), + Setting.Property.NodeScope + ); + public static final Setting GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_cpu_usage.window_duration", + TimeValue.timeValueSeconds(Defaults.WINDOW_DURATION_IN_SECONDS), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_jvmmp.polling_interval", + TimeValue.timeValueMillis(Defaults.POLLING_INTERVAL_IN_MILLIS), + Setting.Property.NodeScope + ); + + public static final Setting GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_jvmmp.window_duration", + TimeValue.timeValueSeconds(Defaults.WINDOW_DURATION_IN_SECONDS), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private volatile TimeValue cpuWindowDuration; + private volatile TimeValue cpuPollingInterval; + private volatile TimeValue memoryWindowDuration; + private volatile TimeValue memoryPollingInterval; + + public ResourceTrackerSettings(Settings settings) { + this.cpuPollingInterval = GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); + this.cpuWindowDuration = GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + this.memoryPollingInterval = GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); + this.memoryWindowDuration = GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + } + + public TimeValue getCpuWindowDuration() { + return this.cpuWindowDuration; + } + + public TimeValue getCpuPollingInterval() { + return cpuPollingInterval; + } + + public TimeValue getMemoryPollingInterval() { + return memoryPollingInterval; + } + + public TimeValue getMemoryWindowDuration() { + return memoryWindowDuration; + } + + public void setCpuWindowDuration(TimeValue cpuWindowDuration) { + this.cpuWindowDuration = cpuWindowDuration; + } + + public void setMemoryWindowDuration(TimeValue memoryWindowDuration) { + this.memoryWindowDuration = memoryWindowDuration; + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java b/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java new file mode 100644 index 0000000000000..aace2a019973e --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Node level resource usage stats tracker package + */ +package org.opensearch.node.resource.tracker; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index e0b35c69cc3c0..7a1b6f113d0e8 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -59,6 +59,8 @@ import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.node.NodeResourceUsageStats; +import org.opensearch.node.NodesResourceUsageStats; import org.opensearch.node.ResponseCollectorService; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; @@ -393,6 +395,24 @@ public void testSerialization() throws IOException { assertEquals(aStats.responseTime, bStats.responseTime, 0.01); }); } + NodesResourceUsageStats resourceUsageStats = nodeStats.getResourceUsageStats(); + NodesResourceUsageStats deserializedResourceUsageStats = deserializedNodeStats.getResourceUsageStats(); + if (resourceUsageStats == null) { + assertNull(deserializedResourceUsageStats); + } else { + resourceUsageStats.getNodeIdToResourceUsageStatsMap().forEach((k, v) -> { + NodeResourceUsageStats aResourceUsageStats = resourceUsageStats.getNodeIdToResourceUsageStatsMap().get(k); + NodeResourceUsageStats bResourceUsageStats = deserializedResourceUsageStats.getNodeIdToResourceUsageStatsMap() + .get(k); + assertEquals( + aResourceUsageStats.getMemoryUtilizationPercent(), + bResourceUsageStats.getMemoryUtilizationPercent(), + 0.0 + ); + assertEquals(aResourceUsageStats.getCpuUtilizationPercent(), bResourceUsageStats.getCpuUtilizationPercent(), 0.0); + assertEquals(aResourceUsageStats.getTimestamp(), bResourceUsageStats.getTimestamp()); + }); + } ScriptCacheStats scriptCacheStats = nodeStats.getScriptCacheStats(); ScriptCacheStats deserializedScriptCacheStats = deserializedNodeStats.getScriptCacheStats(); if (scriptCacheStats == null) { @@ -756,6 +776,30 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { } adaptiveSelectionStats = new AdaptiveSelectionStats(nodeConnections, nodeStats); } + NodesResourceUsageStats nodesResourceUsageStats = null; + if (frequently()) { + int numNodes = randomIntBetween(0, 10); + Map nodeConnections = new HashMap<>(); + Map resourceUsageStatsMap = new HashMap<>(); + for (int i = 0; i < numNodes; i++) { + String nodeId = randomAlphaOfLengthBetween(3, 10); + // add outgoing connection info + if (frequently()) { + nodeConnections.put(nodeId, randomLongBetween(0, 100)); + } + // add node calculations + if (frequently()) { + NodeResourceUsageStats stats = new NodeResourceUsageStats( + nodeId, + System.currentTimeMillis(), + randomDoubleBetween(1.0, 100.0, true), + randomDoubleBetween(1.0, 100.0, true) + ); + resourceUsageStatsMap.put(nodeId, stats); + } + } + nodesResourceUsageStats = new NodesResourceUsageStats(resourceUsageStatsMap); + } ClusterManagerThrottlingStats clusterManagerThrottlingStats = null; if (frequently()) { clusterManagerThrottlingStats = new ClusterManagerThrottlingStats(); @@ -787,6 +831,7 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { discoveryStats, ingestStats, adaptiveSelectionStats, + nodesResourceUsageStats, scriptCacheStats, null, null, diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index 8ba965b3df1ab..64949cf861f70 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -190,6 +190,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -216,6 +217,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -242,6 +244,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ) ); @@ -299,6 +302,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -325,6 +329,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -351,6 +356,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ) ); diff --git a/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java b/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java new file mode 100644 index 0000000000000..b2fa884afab69 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java @@ -0,0 +1,190 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.hamcrest.Matchers.greaterThan; + +/** + * Tests for ResourceUsageCollectorService where we test collect method, get method and whether schedulers + * are working as expected + */ +public class ResourceUsageCollectorServiceTests extends OpenSearchSingleNodeTestCase { + + private ClusterService clusterService; + private ResourceUsageCollectorService collector; + private ThreadPool threadpool; + NodeResourceUsageTracker tracker; + + @Before + public void setUp() throws Exception { + super.setUp(); + + threadpool = new TestThreadPool("resource_usage_collector_tests"); + + clusterService = createClusterService(threadpool); + + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), new TimeValue(500, TimeUnit.MILLISECONDS)) + .build(); + tracker = new NodeResourceUsageTracker( + threadpool, + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + collector = new ResourceUsageCollectorService(tracker, clusterService, threadpool); + tracker.start(); + collector.start(); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadpool.shutdownNow(); + clusterService.close(); + collector.stop(); + tracker.stop(); + collector.close(); + tracker.close(); + } + + public void testResourceUsageStats() { + collector.collectNodeResourceUsageStats("node1", System.currentTimeMillis(), 97, 99); + Map nodeStats = collector.getAllNodeStatistics(); + assertTrue(nodeStats.containsKey("node1")); + assertEquals(99.0, nodeStats.get("node1").cpuUtilizationPercent, 0.0); + assertEquals(97.0, nodeStats.get("node1").memoryUtilizationPercent, 0.0); + + Optional nodeResourceUsageStatsOptional = collector.getNodeStatistics("node1"); + + assertNotNull(nodeResourceUsageStatsOptional.get()); + assertEquals(99.0, nodeResourceUsageStatsOptional.get().cpuUtilizationPercent, 0.0); + assertEquals(97.0, nodeResourceUsageStatsOptional.get().memoryUtilizationPercent, 0.0); + + nodeResourceUsageStatsOptional = collector.getNodeStatistics("node2"); + assertTrue(nodeResourceUsageStatsOptional.isEmpty()); + } + + public void testScheduler() throws Exception { + /** + * Wait for cluster state to be ready so that localNode().getId() is ready and we add the values to the map + */ + assertBusy(() -> assertTrue(collector.getNodeStatistics(clusterService.localNode().getId()).isPresent()), 1, TimeUnit.MINUTES); + assertTrue(collector.getNodeStatistics(clusterService.localNode().getId()).isPresent()); + /** + * Wait for memory utilization to be reported greater than 0 + */ + assertBusy( + () -> assertThat( + collector.getNodeStatistics(clusterService.localNode().getId()).get().getMemoryUtilizationPercent(), + greaterThan(0.0) + ), + 5, + TimeUnit.SECONDS + ); + assertTrue(collector.getNodeStatistics("Invalid").isEmpty()); + } + + /* + * Test that concurrently adding values and removing nodes does not cause exceptions + */ + public void testConcurrentAddingAndRemovingNodes() throws Exception { + String[] nodes = new String[] { "a", "b", "c", "d" }; + + final CountDownLatch latch = new CountDownLatch(5); + + Runnable f = () -> { + latch.countDown(); + try { + latch.await(); + } catch (InterruptedException e) { + fail("should not be interrupted"); + } + for (int i = 0; i < randomIntBetween(100, 200); i++) { + if (randomBoolean()) { + collector.removeNodeResourceUsageStats(randomFrom(nodes)); + } + collector.collectNodeResourceUsageStats( + randomFrom(nodes), + System.currentTimeMillis(), + randomIntBetween(1, 100), + randomIntBetween(1, 100) + ); + } + }; + + Thread t1 = new Thread(f); + Thread t2 = new Thread(f); + Thread t3 = new Thread(f); + Thread t4 = new Thread(f); + + t1.start(); + t2.start(); + t3.start(); + t4.start(); + latch.countDown(); + t1.join(); + t2.join(); + t3.join(); + t4.join(); + + final Map nodeStats = collector.getAllNodeStatistics(); + for (String nodeId : nodes) { + if (nodeStats.containsKey(nodeId)) { + assertThat(nodeStats.get(nodeId).memoryUtilizationPercent, greaterThan(0.0)); + assertThat(nodeStats.get(nodeId).cpuUtilizationPercent, greaterThan(0.0)); + } + } + } + + public void testNodeRemoval() { + collector.collectNodeResourceUsageStats("node1", System.currentTimeMillis(), randomIntBetween(1, 100), randomIntBetween(1, 100)); + collector.collectNodeResourceUsageStats("node2", System.currentTimeMillis(), randomIntBetween(1, 100), randomIntBetween(1, 100)); + + ClusterState previousState = ClusterState.builder(new ClusterName("cluster")) + .nodes( + DiscoveryNodes.builder() + .add(DiscoveryNode.createLocal(Settings.EMPTY, new TransportAddress(TransportAddress.META_ADDRESS, 9200), "node1")) + .add(DiscoveryNode.createLocal(Settings.EMPTY, new TransportAddress(TransportAddress.META_ADDRESS, 9201), "node2")) + ) + .build(); + ClusterState newState = ClusterState.builder(previousState) + .nodes(DiscoveryNodes.builder(previousState.nodes()).remove("node2")) + .build(); + ClusterChangedEvent event = new ClusterChangedEvent("test", newState, previousState); + + collector.clusterChanged(event); + final Map nodeStats = collector.getAllNodeStatistics(); + assertTrue(nodeStats.containsKey("node1")); + assertFalse(nodeStats.containsKey("node2")); + } +} diff --git a/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java b/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java new file mode 100644 index 0000000000000..374c993a264d4 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +/** + * Tests to validate AverageMemoryUsageTracker and AverageCpuUsageTracker implementation + */ +public class AverageUsageTrackerTests extends OpenSearchTestCase { + ThreadPool threadPool; + AverageMemoryUsageTracker averageMemoryUsageTracker; + AverageCpuUsageTracker averageCpuUsageTracker; + + @Before + public void setup() { + threadPool = new TestThreadPool(getClass().getName()); + averageMemoryUsageTracker = new AverageMemoryUsageTracker( + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + averageCpuUsageTracker = new AverageCpuUsageTracker( + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + } + + @After + public void cleanup() { + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + } + + public void testBasicUsage() { + + assertAverageUsageStats(averageMemoryUsageTracker); + assertAverageUsageStats(averageCpuUsageTracker); + } + + public void testUpdateWindowSize() { + assertUpdateWindowSize(averageMemoryUsageTracker); + assertUpdateWindowSize(averageCpuUsageTracker); + } + + private void assertAverageUsageStats(AbstractAverageUsageTracker usageTracker) { + usageTracker.recordUsage(1); + assertFalse(usageTracker.isReady()); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + assertEquals(2, usageTracker.getWindowSize()); + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(5); + // ( 2 + 5 ) / 2 = 3.5 + assertEquals(3.5, usageTracker.getAverage(), 0.0); + } + + private void assertUpdateWindowSize(AbstractAverageUsageTracker usageTracker) { + usageTracker.recordUsage(1); + usageTracker.recordUsage(2); + + assertEquals(2, usageTracker.getWindowSize()); + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(5); + // ( 2 + 5 ) / 2 = 3.5 + assertEquals(3.5, usageTracker.getAverage(), 0.0); + + usageTracker.setWindowSize(new TimeValue(2000, TimeUnit.MILLISECONDS)); + assertEquals(0, usageTracker.getWindowSize()); + assertEquals(0.0, usageTracker.getAverage(), 0.0); + // verify 2000/500 = 4 is the window size and average is calculated on window size of 4 + usageTracker.recordUsage(1); + usageTracker.recordUsage(2); + usageTracker.recordUsage(1); + assertFalse(usageTracker.isReady()); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + assertEquals(4, usageTracker.getWindowSize()); + // (1 + 2 + 1 + 2 ) / 4 = 1.5 + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + // ( 2 + 1 + 2 + 2 ) / 4 = 1.75 + assertEquals(1.75, usageTracker.getAverage(), 0.0); + } +} diff --git a/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java b/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java new file mode 100644 index 0000000000000..1ce68b9f29062 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThan; + +/** + * Tests to assert resource usage trackers retrieving resource utilization averages + */ +public class NodeResourceUsageTrackerTests extends OpenSearchSingleNodeTestCase { + ThreadPool threadPool; + + @Before + public void setup() { + threadPool = new TestThreadPool(getClass().getName()); + } + + @After + public void cleanup() { + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testStats() throws Exception { + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), new TimeValue(500, TimeUnit.MILLISECONDS)) + .build(); + NodeResourceUsageTracker tracker = new NodeResourceUsageTracker( + threadPool, + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + tracker.start(); + /** + * Asserting memory utilization to be greater than 0 + * cpu percent used is mostly 0, so skipping assertion for that + */ + assertBusy(() -> assertThat(tracker.getMemoryUtilizationPercent(), greaterThan(0.0)), 5, TimeUnit.SECONDS); + tracker.stop(); + tracker.close(); + } + + public void testUpdateSettings() { + NodeResourceUsageTracker tracker = new NodeResourceUsageTracker( + threadPool, + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + assertEquals(tracker.getResourceTrackerSettings().getCpuWindowDuration().getSeconds(), 30); + assertEquals(tracker.getResourceTrackerSettings().getMemoryWindowDuration().getSeconds(), 30); + + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "10s") + .build(); + ClusterUpdateSettingsResponse response = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get(); + assertEquals( + "10s", + response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) + ); + + Settings jvmsettings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "5s") + .build(); + response = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(jvmsettings).get(); + assertEquals( + "5s", + response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) + ); + } +} diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index 6354cf18e8b62..a520b6278ea47 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -112,6 +112,7 @@ List adjustNodesStats(List nodesStats) { nodeStats.getDiscoveryStats(), nodeStats.getIngestStats(), nodeStats.getAdaptiveSelectionStats(), + nodeStats.getResourceUsageStats(), nodeStats.getScriptCacheStats(), nodeStats.getIndexingPressureStats(), nodeStats.getShardIndexingPressureStats(), diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index acdc5b94804c6..3c7423f73685f 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2718,6 +2718,7 @@ public void ensureEstimatedStats() { false, false, false, + false, false ); assertThat( From dd6355910bb92af47f78970d50ca646a66b014be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 10:15:31 -0400 Subject: [PATCH 018/178] Bump org.codehaus.woodstox:stax2-api from 4.2.1 to 4.2.2 in /plugins/repository-hdfs (#10639) * Bump org.codehaus.woodstox:stax2-api in /plugins/repository-hdfs Bumps [org.codehaus.woodstox:stax2-api](https://github.com/FasterXML/stax2-api) from 4.2.1 to 4.2.2. - [Commits](https://github.com/FasterXML/stax2-api/compare/stax2-api-4.2.1...stax2-api-4.2.2) --- updated-dependencies: - dependency-name: org.codehaus.woodstox:stax2-api dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 3 ++- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 | 1 - plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2211f59574718..018ad6a676bcf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -94,6 +94,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) - Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) - Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) @@ -113,4 +114,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 1ce29afe0e69a..ed1f54888a26f 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -84,7 +84,7 @@ dependencies { api 'net.minidev:json-smart:2.5.0' api "io.netty:netty-all:${versions.netty}" implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - implementation 'org.codehaus.woodstox:stax2-api:4.2.1' + implementation 'org.codehaus.woodstox:stax2-api:4.2.2' hdfsFixture project(':test:fixtures:hdfs-fixture') // Set the keytab files in the classpath so that we can access them from test code without the security manager diff --git a/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 b/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 deleted file mode 100644 index 2c12704cdc560..0000000000000 --- a/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3f7325c52240418c2ba257b103c3c550e140c83 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 b/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 new file mode 100644 index 0000000000000..b15a7ead0d016 --- /dev/null +++ b/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 @@ -0,0 +1 @@ +b0d746cadea928e5264f2ea294ea9a1bf815bbde \ No newline at end of file From e8ba35cff433b3dc0d821d04326b2bf261beab06 Mon Sep 17 00:00:00 2001 From: Stephen Crawford <65832608+scrawfor99@users.noreply.github.com> Date: Mon, 16 Oct 2023 17:15:26 -0400 Subject: [PATCH 019/178] Remove the authenticate token method from TokenManager interface (#10614) Remove the authenticate token method from TokenManager interface Signed-off-by: Stephen Crawford Signed-off-by: Stephen Crawford <65832608+scrawfor99@users.noreply.github.com> --- .../identity/shiro/ShiroTokenManager.java | 16 ---------------- .../identity/shiro/AuthTokenHandlerTests.java | 5 ----- .../identity/noop/NoopTokenManager.java | 5 ----- .../opensearch/identity/tokens/TokenManager.java | 7 ------- 4 files changed, 33 deletions(-) diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java index ddfb99e626718..a14215aa7655b 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java @@ -10,13 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.shiro.SecurityUtils; import org.apache.shiro.authc.AuthenticationToken; import org.apache.shiro.authc.UsernamePasswordToken; import org.opensearch.common.Randomness; import org.opensearch.identity.IdentityService; import org.opensearch.identity.Subject; -import org.opensearch.identity.noop.NoopSubject; import org.opensearch.identity.tokens.AuthToken; import org.opensearch.identity.tokens.BasicAuthToken; import org.opensearch.identity.tokens.OnBehalfOfClaims; @@ -88,20 +86,6 @@ public AuthToken issueServiceAccountToken(String audience) { return token; } - @Override - public Subject authenticateToken(AuthToken authToken) { - return new NoopSubject(); - } - - public boolean validateToken(AuthToken token) { - if (token instanceof BasicAuthToken) { - final BasicAuthToken basicAuthToken = (BasicAuthToken) token; - return basicAuthToken.getUser().equals(SecurityUtils.getSubject().toString()) - && basicAuthToken.getPassword().equals(shiroTokenPasswordMap.get(basicAuthToken)); - } - return false; - } - public String getTokenInfo(AuthToken token) { if (token instanceof BasicAuthToken) { final BasicAuthToken basicAuthToken = (BasicAuthToken) token; diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java index db77ced298991..f99484083e2fb 100644 --- a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java @@ -100,11 +100,6 @@ public void testShouldFailGetTokenInfo() { assertThrows(UnsupportedAuthenticationToken.class, () -> shiroAuthTokenHandler.getTokenInfo(bearerAuthToken)); } - public void testShouldFailValidateToken() { - final BearerAuthToken bearerAuthToken = new BearerAuthToken("header.payload.signature"); - assertFalse(shiroAuthTokenHandler.validateToken(bearerAuthToken)); - } - public void testShoudPassMapLookupWithToken() { final BasicAuthToken authToken = new BasicAuthToken("Basic dGVzdDp0ZTpzdA=="); shiroAuthTokenHandler.getShiroTokenPasswordMap().put(authToken, "te:st"); diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java b/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java index 1dc3a58916b5c..fa6643b7447dc 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java @@ -50,9 +50,4 @@ public String asAuthHeaderValue() { } }; } - - @Override - public Subject authenticateToken(AuthToken authToken) { - return null; - } } diff --git a/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java b/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java index b4048251a06a2..972a9a1080955 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java +++ b/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java @@ -30,11 +30,4 @@ public interface TokenManager { * @return a new auth token */ public AuthToken issueServiceAccountToken(final String audience); - - /** - * Authenticates a provided authToken - * @param authToken: The authToken to authenticate - * @return The authenticated subject - */ - public Subject authenticateToken(AuthToken authToken); } From a8574cd69757171732b43bb2ec91390be4324b22 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 18:11:45 -0400 Subject: [PATCH 020/178] Bump com.google.http-client:google-http-client from 1.43.2 to 1.43.3 in /plugins/repository-gcs (#10635) * Bump com.google.http-client:google-http-client Bumps [com.google.http-client:google-http-client](https://github.com/googleapis/google-http-java-client) from 1.43.2 to 1.43.3. - [Release notes](https://github.com/googleapis/google-http-java-client/releases) - [Changelog](https://github.com/googleapis/google-http-java-client/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/google-http-java-client/compare/v1.43.2...v1.43.3) --- updated-dependencies: - dependency-name: com.google.http-client:google-http-client dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 4 ++-- .../licenses/google-http-client-1.43.2.jar.sha1 | 1 - .../licenses/google-http-client-1.43.3.jar.sha1 | 1 + .../licenses/google-http-client-appengine-1.43.2.jar.sha1 | 1 - .../licenses/google-http-client-appengine-1.43.3.jar.sha1 | 1 + 6 files changed, 5 insertions(+), 4 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 018ad6a676bcf..4dd19586ba263 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -95,6 +95,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) - Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) - Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) +- Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index da4978608a12f..1bef5146f1db9 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -75,8 +75,8 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" api 'com.google.guava:failureaccess:1.0.1' - api 'com.google.http-client:google-http-client:1.43.2' - api 'com.google.http-client:google-http-client-appengine:1.43.2' + api 'com.google.http-client:google-http-client:1.43.3' + api 'com.google.http-client:google-http-client-appengine:1.43.3' api 'com.google.http-client:google-http-client-gson:1.43.3' api 'com.google.http-client:google-http-client-jackson2:1.43.3' diff --git a/plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 deleted file mode 100644 index a576a74c62542..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2520469ebd8c0675f0d2aeafd2da665228320fcf \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 new file mode 100644 index 0000000000000..800467de8bdf3 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 @@ -0,0 +1 @@ +a758b82e55a2f5f681e289c5ed384d3dbda6f3cd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 deleted file mode 100644 index d8a9dba20070b..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9fb548c5264227813fd83991b94a705b0841c15f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 new file mode 100644 index 0000000000000..4adcca6a55902 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 @@ -0,0 +1 @@ +09d6cbdde6ea3469a67601a811b4e83de3e68a79 \ No newline at end of file From ec0cbb3a6efb9cccb02ebc020e45260c61a65244 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 16 Oct 2023 18:25:30 -0400 Subject: [PATCH 021/178] [BUG] Fix java.lang.SecurityException in repository-gcs plugin (#10642) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../repositories/gcs/GoogleCloudStorageService.java | 2 +- .../opensearch/repositories/gcs/SocketAccess.java | 13 +++++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4dd19586ba263..33c6f00fdd9ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -102,6 +102,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) - Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) - [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) +- [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) ### Deprecated diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java index 445e1d65f3d3e..c9ebb3acaf3e5 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java @@ -228,7 +228,7 @@ StorageOptions createStorageOptions( } storageOptionsBuilder.setCredentials(serviceAccountCredentials); } - return storageOptionsBuilder.build(); + return SocketAccess.doPrivilegedException(() -> storageOptionsBuilder.build()); } /** diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java index 197e772df30d5..35127d6ea4060 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java @@ -32,6 +32,7 @@ package org.opensearch.repositories.gcs; +import org.apache.logging.log4j.core.util.Throwables; import org.opensearch.SpecialPermission; import org.opensearch.common.CheckedRunnable; @@ -71,4 +72,16 @@ public static void doPrivilegedVoidIOException(CheckedRunnable acti throw (IOException) e.getCause(); } } + + public static T doPrivilegedException(PrivilegedExceptionAction operation) { + SpecialPermission.check(); + try { + return AccessController.doPrivileged(operation); + } catch (PrivilegedActionException e) { + Throwables.rethrow(e.getCause()); + assert false : "always throws"; + return null; + } + } + } From 4112f8d1a5594b88cf267b990a954711670a34cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 19:25:28 -0400 Subject: [PATCH 022/178] Bump com.squareup.okio:okio from 3.5.0 to 3.6.0 in /test/fixtures/hdfs-fixture (#10637) * Bump com.squareup.okio:okio in /test/fixtures/hdfs-fixture Bumps [com.squareup.okio:okio](https://github.com/square/okio) from 3.5.0 to 3.6.0. - [Changelog](https://github.com/square/okio/blob/master/CHANGELOG.md) - [Commits](https://github.com/square/okio/compare/parent-3.5.0...parent-3.6.0) --- updated-dependencies: - dependency-name: com.squareup.okio:okio dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 33c6f00fdd9ce..46b3fc7852ebc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -96,6 +96,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) - Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) - Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) +- Bump `com.squareup.okio:okio` from 3.5.0 to 3.6.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 7de98c0986b87..13345fcb20de1 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -71,6 +71,6 @@ dependencies { runtimeOnly("com.squareup.okhttp3:okhttp:4.11.0") { exclude group: "com.squareup.okio" } - runtimeOnly "com.squareup.okio:okio:3.5.0" + runtimeOnly "com.squareup.okio:okio:3.6.0" runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.5" } From 8bc6791f6a3bd58b4ee25bf5ae61504570c8d424 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Tue, 17 Oct 2023 10:57:14 +0530 Subject: [PATCH 023/178] Enhancing FS stats to include read/write time, queue size and IO time (#10541) * Enhancing FS stats to include read / write time, io time and queue size Signed-off-by: Bharathwaj G --- CHANGELOG.md | 1 + .../org/opensearch/monitor/fs/FsInfo.java | 178 +++++++++++++++++- .../org/opensearch/monitor/fs/FsProbe.java | 29 +++ .../cluster/node/stats/NodeStatsTests.java | 12 ++ .../monitor/fs/DeviceStatsTests.java | 19 +- .../opensearch/monitor/fs/FsProbeTests.java | 51 +++++ 6 files changed, 285 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 46b3fc7852ebc..a02b5f4e7242e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) - [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) ### Dependencies diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java index 114702ff0d351..4e2e9f280d765 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java @@ -235,6 +235,14 @@ public static class DeviceStats implements Writeable, ToXContentFragment { final long previousWritesCompleted; final long currentSectorsWritten; final long previousSectorsWritten; + final long currentReadTime; + final long previousReadTime; + final long currentWriteTime; + final long previousWriteTime; + final long currentQueueSize; + final long previousQueueSize; + final long currentIOTime; + final long previousIOTime; public DeviceStats( final int majorDeviceNumber, @@ -244,6 +252,10 @@ public DeviceStats( final long currentSectorsRead, final long currentWritesCompleted, final long currentSectorsWritten, + final long currentReadTime, + final long currentWriteTime, + final long currrentQueueSize, + final long currentIOTime, final DeviceStats previousDeviceStats ) { this( @@ -257,7 +269,15 @@ public DeviceStats( currentSectorsRead, previousDeviceStats != null ? previousDeviceStats.currentSectorsRead : -1, currentWritesCompleted, - previousDeviceStats != null ? previousDeviceStats.currentWritesCompleted : -1 + previousDeviceStats != null ? previousDeviceStats.currentWritesCompleted : -1, + currentReadTime, + previousDeviceStats != null ? previousDeviceStats.currentReadTime : -1, + currentWriteTime, + previousDeviceStats != null ? previousDeviceStats.currentWriteTime : -1, + currrentQueueSize, + previousDeviceStats != null ? previousDeviceStats.currentQueueSize : -1, + currentIOTime, + previousDeviceStats != null ? previousDeviceStats.currentIOTime : -1 ); } @@ -272,7 +292,15 @@ private DeviceStats( final long currentSectorsRead, final long previousSectorsRead, final long currentWritesCompleted, - final long previousWritesCompleted + final long previousWritesCompleted, + final long currentReadTime, + final long previousReadTime, + final long currentWriteTime, + final long previousWriteTime, + final long currentQueueSize, + final long previousQueueSize, + final long currentIOTime, + final long previousIOTime ) { this.majorDeviceNumber = majorDeviceNumber; this.minorDeviceNumber = minorDeviceNumber; @@ -285,6 +313,14 @@ private DeviceStats( this.previousSectorsRead = previousSectorsRead; this.currentSectorsWritten = currentSectorsWritten; this.previousSectorsWritten = previousSectorsWritten; + this.currentReadTime = currentReadTime; + this.previousReadTime = previousReadTime; + this.currentWriteTime = currentWriteTime; + this.previousWriteTime = previousWriteTime; + this.currentQueueSize = currentQueueSize; + this.previousQueueSize = previousQueueSize; + this.currentIOTime = currentIOTime; + this.previousIOTime = previousIOTime; } public DeviceStats(StreamInput in) throws IOException { @@ -299,6 +335,25 @@ public DeviceStats(StreamInput in) throws IOException { previousSectorsRead = in.readLong(); currentSectorsWritten = in.readLong(); previousSectorsWritten = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + currentReadTime = in.readLong(); + previousReadTime = in.readLong(); + currentWriteTime = in.readLong(); + previousWriteTime = in.readLong(); + currentQueueSize = in.readLong(); + previousQueueSize = in.readLong(); + currentIOTime = in.readLong(); + previousIOTime = in.readLong(); + } else { + currentReadTime = 0; + previousReadTime = 0; + currentWriteTime = 0; + previousWriteTime = 0; + currentQueueSize = 0; + previousQueueSize = 0; + currentIOTime = 0; + previousIOTime = 0; + } } @Override @@ -314,6 +369,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(previousSectorsRead); out.writeLong(currentSectorsWritten); out.writeLong(previousSectorsWritten); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeLong(currentReadTime); + out.writeLong(previousReadTime); + out.writeLong(currentWriteTime); + out.writeLong(previousWriteTime); + out.writeLong(currentQueueSize); + out.writeLong(previousQueueSize); + out.writeLong(currentIOTime); + out.writeLong(previousIOTime); + } } public long operations() { @@ -346,6 +411,39 @@ public long writeKilobytes() { return (currentSectorsWritten - previousSectorsWritten) / 2; } + /** + * Total time taken for all read operations + */ + public long readTime() { + if (previousReadTime == -1) return -1; + return currentReadTime - previousReadTime; + } + + /** + * Total time taken for all write operations + */ + public long writeTime() { + if (previousWriteTime == -1) return -1; + return currentWriteTime - previousWriteTime; + } + + /** + * Queue size based on weighted time spent doing I/Os + */ + public long queueSize() { + if (previousQueueSize == -1) return -1; + return currentQueueSize - previousQueueSize; + } + + /** + * Total time spent doing I/Os + */ + public long ioTimeInMillis() { + if (previousIOTime == -1) return -1; + + return (currentIOTime - previousIOTime); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("device_name", deviceName); @@ -354,9 +452,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(IoStats.WRITE_OPERATIONS, writeOperations()); builder.field(IoStats.READ_KILOBYTES, readKilobytes()); builder.field(IoStats.WRITE_KILOBYTES, writeKilobytes()); + builder.field(IoStats.READ_TIME, readTime()); + builder.field(IoStats.WRITE_TIME, writeTime()); + builder.field(IoStats.QUEUE_SIZE, queueSize()); + builder.field(IoStats.IO_TIME_MS, ioTimeInMillis()); return builder; } - } /** @@ -371,6 +472,10 @@ public static class IoStats implements Writeable, ToXContentFragment { private static final String WRITE_OPERATIONS = "write_operations"; private static final String READ_KILOBYTES = "read_kilobytes"; private static final String WRITE_KILOBYTES = "write_kilobytes"; + private static final String READ_TIME = "read_time"; + private static final String WRITE_TIME = "write_time"; + private static final String QUEUE_SIZE = "queue_size"; + private static final String IO_TIME_MS = "io_time_in_millis"; final DeviceStats[] devicesStats; final long totalOperations; @@ -378,6 +483,10 @@ public static class IoStats implements Writeable, ToXContentFragment { final long totalWriteOperations; final long totalReadKilobytes; final long totalWriteKilobytes; + final long totalReadTime; + final long totalWriteTime; + final long totalQueueSize; + final long totalIOTimeInMillis; public IoStats(final DeviceStats[] devicesStats) { this.devicesStats = devicesStats; @@ -386,18 +495,30 @@ public IoStats(final DeviceStats[] devicesStats) { long totalWriteOperations = 0; long totalReadKilobytes = 0; long totalWriteKilobytes = 0; + long totalReadTime = 0; + long totalWriteTime = 0; + long totalQueueSize = 0; + long totalIOTimeInMillis = 0; for (DeviceStats deviceStats : devicesStats) { totalOperations += deviceStats.operations() != -1 ? deviceStats.operations() : 0; totalReadOperations += deviceStats.readOperations() != -1 ? deviceStats.readOperations() : 0; totalWriteOperations += deviceStats.writeOperations() != -1 ? deviceStats.writeOperations() : 0; totalReadKilobytes += deviceStats.readKilobytes() != -1 ? deviceStats.readKilobytes() : 0; totalWriteKilobytes += deviceStats.writeKilobytes() != -1 ? deviceStats.writeKilobytes() : 0; + totalReadTime += deviceStats.readTime() != -1 ? deviceStats.readTime() : 0; + totalWriteTime += deviceStats.writeTime() != -1 ? deviceStats.writeTime() : 0; + totalQueueSize += deviceStats.queueSize() != -1 ? deviceStats.queueSize() : 0; + totalIOTimeInMillis += deviceStats.ioTimeInMillis() != -1 ? deviceStats.ioTimeInMillis() : 0; } this.totalOperations = totalOperations; this.totalReadOperations = totalReadOperations; this.totalWriteOperations = totalWriteOperations; this.totalReadKilobytes = totalReadKilobytes; this.totalWriteKilobytes = totalWriteKilobytes; + this.totalReadTime = totalReadTime; + this.totalWriteTime = totalWriteTime; + this.totalQueueSize = totalQueueSize; + this.totalIOTimeInMillis = totalIOTimeInMillis; } public IoStats(StreamInput in) throws IOException { @@ -412,6 +533,17 @@ public IoStats(StreamInput in) throws IOException { this.totalWriteOperations = in.readLong(); this.totalReadKilobytes = in.readLong(); this.totalWriteKilobytes = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.totalReadTime = in.readLong(); + this.totalWriteTime = in.readLong(); + this.totalQueueSize = in.readLong(); + this.totalIOTimeInMillis = in.readLong(); + } else { + this.totalReadTime = 0; + this.totalWriteTime = 0; + this.totalQueueSize = 0; + this.totalIOTimeInMillis = 0; + } } @Override @@ -425,6 +557,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(totalWriteOperations); out.writeLong(totalReadKilobytes); out.writeLong(totalWriteKilobytes); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeLong(totalReadTime); + out.writeLong(totalWriteTime); + out.writeLong(totalQueueSize); + out.writeLong(totalIOTimeInMillis); + } } public DeviceStats[] getDevicesStats() { @@ -451,6 +589,34 @@ public long getTotalWriteKilobytes() { return totalWriteKilobytes; } + /** + * Sum of read time across all devices + */ + public long getTotalReadTime() { + return totalReadTime; + } + + /** + * Sum of write time across all devices + */ + public long getTotalWriteTime() { + return totalWriteTime; + } + + /** + * Sum of queue size across all devices + */ + public long getTotalQueueSize() { + return totalQueueSize; + } + + /** + * Sum of IO time across all devices + */ + public long getTotalIOTimeMillis() { + return totalIOTimeInMillis; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (devicesStats.length > 0) { @@ -468,11 +634,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(WRITE_OPERATIONS, totalWriteOperations); builder.field(READ_KILOBYTES, totalReadKilobytes); builder.field(WRITE_KILOBYTES, totalWriteKilobytes); + + builder.field(READ_TIME, totalReadTime); + builder.field(WRITE_TIME, totalWriteTime); + builder.field(QUEUE_SIZE, totalQueueSize); + builder.field(IO_TIME_MS, totalIOTimeInMillis); builder.endObject(); } return builder; } - } private final long timestamp; diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java index e20d84cd9763e..f4731a4a34373 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java @@ -109,6 +109,25 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, List devicesStats = new ArrayList<>(); + /** + * The /proc/diskstats file displays the I/O statistics of block devices. + * Each line contains the following 14 fields: ( + additional fields ) + * + * 1 major number + * 2 minor number + * 3 device name + * 4 reads completed successfully + * 5 reads merged + * 6 sectors read + * 7 time spent reading (ms) + * 8 writes completed + * 9 writes merged + * 10 sectors written + * 11 time spent writing (ms) + * 12 I/Os currently in progress + * 13 time spent doing I/Os (ms) ---- IO use percent + * 14 weighted time spent doing I/Os (ms) ---- Queue size + */ List lines = readProcDiskStats(); if (!lines.isEmpty()) { for (String line : lines) { @@ -123,6 +142,12 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, final long sectorsRead = Long.parseLong(fields[5]); final long writesCompleted = Long.parseLong(fields[7]); final long sectorsWritten = Long.parseLong(fields[9]); + // readTime and writeTime calculates the total read/write time taken for each request to complete + // ioTime calculates actual time queue and disks are busy + final long readTime = Long.parseLong(fields[6]); + final long writeTime = Long.parseLong(fields[10]); + final long ioTime = fields.length > 12 ? Long.parseLong(fields[12]) : 0; + final long queueSize = fields.length > 13 ? Long.parseLong(fields[13]) : 0; final FsInfo.DeviceStats deviceStats = new FsInfo.DeviceStats( majorDeviceNumber, minorDeviceNumber, @@ -131,6 +156,10 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, sectorsRead, writesCompleted, sectorsWritten, + readTime, + writeTime, + queueSize, + ioTime, deviceMap.get(Tuple.tuple(majorDeviceNumber, minorDeviceNumber)) ); devicesStats.add(deviceStats); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 7a1b6f113d0e8..4b865383ee007 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -284,6 +284,10 @@ public void testSerialization() throws IOException { assertEquals(ioStats.getTotalReadOperations(), deserializedIoStats.getTotalReadOperations()); assertEquals(ioStats.getTotalWriteKilobytes(), deserializedIoStats.getTotalWriteKilobytes()); assertEquals(ioStats.getTotalWriteOperations(), deserializedIoStats.getTotalWriteOperations()); + assertEquals(ioStats.getTotalReadTime(), deserializedIoStats.getTotalReadTime()); + assertEquals(ioStats.getTotalWriteTime(), deserializedIoStats.getTotalWriteTime()); + assertEquals(ioStats.getTotalQueueSize(), deserializedIoStats.getTotalQueueSize()); + assertEquals(ioStats.getTotalIOTimeMillis(), deserializedIoStats.getTotalIOTimeMillis()); assertEquals(ioStats.getDevicesStats().length, deserializedIoStats.getDevicesStats().length); for (int i = 0; i < ioStats.getDevicesStats().length; i++) { FsInfo.DeviceStats deviceStats = ioStats.getDevicesStats()[i]; @@ -645,6 +649,10 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), null ); deviceStatsArray[i] = new FsInfo.DeviceStats( @@ -655,6 +663,10 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), previousDeviceStats ); } diff --git a/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java b/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java index 0fd039b84e887..0059f8e215f2e 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java @@ -46,7 +46,12 @@ public void testDeviceStats() { final int sectorsRead = randomIntBetween(8 * readsCompleted, 16 * readsCompleted); final int writesCompleted = randomIntBetween(1, 1 << 16); final int sectorsWritten = randomIntBetween(8 * writesCompleted, 16 * writesCompleted); - + final int readTime = randomIntBetween(1, 1 << 16); + ; + final int writeTime = randomIntBetween(1, 1 << 16); + ; + final int queueSize = randomIntBetween(1, 1 << 16); + final int ioTime = randomIntBetween(1, 1 << 16); FsInfo.DeviceStats previous = new FsInfo.DeviceStats( majorDeviceNumber, minorDeviceNumber, @@ -55,6 +60,10 @@ public void testDeviceStats() { sectorsRead, writesCompleted, sectorsWritten, + readTime, + writeTime, + queueSize, + ioTime, null ); FsInfo.DeviceStats current = new FsInfo.DeviceStats( @@ -65,6 +74,10 @@ public void testDeviceStats() { sectorsRead + 16384, writesCompleted + 2048, sectorsWritten + 32768, + readTime + 500, + writeTime + 100, + queueSize + 20, + ioTime + 8192, previous ); assertThat(current.operations(), equalTo(1024L + 2048L)); @@ -72,6 +85,10 @@ public void testDeviceStats() { assertThat(current.writeOperations(), equalTo(2048L)); assertThat(current.readKilobytes(), equalTo(16384L / 2)); assertThat(current.writeKilobytes(), equalTo(32768L / 2)); + assertEquals(500, current.readTime()); + assertEquals(100, current.writeTime()); + assertEquals(20, current.queueSize()); + assertEquals(8192, current.ioTimeInMillis()); } } diff --git a/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java b/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java index 686a624d988d7..59a888c665be7 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java @@ -91,6 +91,14 @@ public void testFsInfo() throws IOException { assertThat(deviceStats.previousWritesCompleted, equalTo(-1L)); assertThat(deviceStats.currentSectorsWritten, greaterThanOrEqualTo(0L)); assertThat(deviceStats.previousSectorsWritten, equalTo(-1L)); + assertThat(deviceStats.currentReadTime, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousReadTime, greaterThanOrEqualTo(-1L)); + assertThat(deviceStats.currentWriteTime, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousWriteTime, greaterThanOrEqualTo(-1L)); + assertThat(deviceStats.currentQueueSize, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousQueueSize, greaterThanOrEqualTo(-1L)); + assertThat(deviceStats.currentIOTime, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousIOTime, greaterThanOrEqualTo(-1L)); } } else { assertNull(stats.getIoStats()); @@ -243,6 +251,16 @@ List readProcDiskStats() throws IOException { assertThat(first.devicesStats[0].previousWritesCompleted, equalTo(-1L)); assertThat(first.devicesStats[0].currentSectorsWritten, equalTo(118857776L)); assertThat(first.devicesStats[0].previousSectorsWritten, equalTo(-1L)); + + assertEquals(33457, first.devicesStats[0].currentReadTime); + assertEquals(-1, first.devicesStats[0].previousReadTime); + assertEquals(18730966, first.devicesStats[0].currentWriteTime); + assertEquals(-1, first.devicesStats[0].previousWriteTime); + assertEquals(18767169, first.devicesStats[0].currentQueueSize); + assertEquals(-1, first.devicesStats[0].previousQueueSize); + assertEquals(1918440, first.devicesStats[0].currentIOTime); + assertEquals(-1, first.devicesStats[0].previousIOTime); + assertThat(first.devicesStats[1].majorDeviceNumber, equalTo(253)); assertThat(first.devicesStats[1].minorDeviceNumber, equalTo(2)); assertThat(first.devicesStats[1].deviceName, equalTo("dm-2")); @@ -255,6 +273,15 @@ List readProcDiskStats() throws IOException { assertThat(first.devicesStats[1].currentSectorsWritten, equalTo(64126096L)); assertThat(first.devicesStats[1].previousSectorsWritten, equalTo(-1L)); + assertEquals(49312, first.devicesStats[1].currentReadTime); + assertEquals(-1, first.devicesStats[1].previousReadTime); + assertEquals(33730596, first.devicesStats[1].currentWriteTime); + assertEquals(-1, first.devicesStats[1].previousWriteTime); + assertEquals(33781827, first.devicesStats[1].currentQueueSize); + assertEquals(-1, first.devicesStats[1].previousQueueSize); + assertEquals(1058193, first.devicesStats[1].currentIOTime); + assertEquals(-1, first.devicesStats[1].previousIOTime); + diskStats.set( Arrays.asList( " 259 0 nvme0n1 336870 0 7928397 82876 10264393 0 182986405 52451610 0 2971042 52536492", @@ -281,6 +308,16 @@ List readProcDiskStats() throws IOException { assertThat(second.devicesStats[0].previousWritesCompleted, equalTo(8398869L)); assertThat(second.devicesStats[0].currentSectorsWritten, equalTo(118857776L)); assertThat(second.devicesStats[0].previousSectorsWritten, equalTo(118857776L)); + + assertEquals(33464, second.devicesStats[0].currentReadTime); + assertEquals(33457, second.devicesStats[0].previousReadTime); + assertEquals(18730966, second.devicesStats[0].currentWriteTime); + assertEquals(18730966, second.devicesStats[0].previousWriteTime); + assertEquals(18767176, second.devicesStats[0].currentQueueSize); + assertEquals(18767169, second.devicesStats[0].previousQueueSize); + assertEquals(1918444, second.devicesStats[0].currentIOTime); + assertEquals(1918440, second.devicesStats[0].previousIOTime); + assertThat(second.devicesStats[1].majorDeviceNumber, equalTo(253)); assertThat(second.devicesStats[1].minorDeviceNumber, equalTo(2)); assertThat(second.devicesStats[1].deviceName, equalTo("dm-2")); @@ -293,11 +330,25 @@ List readProcDiskStats() throws IOException { assertThat(second.devicesStats[1].currentSectorsWritten, equalTo(64128568L)); assertThat(second.devicesStats[1].previousSectorsWritten, equalTo(64126096L)); + assertEquals(49369, second.devicesStats[1].currentReadTime); + assertEquals(49312, second.devicesStats[1].previousReadTime); + assertEquals(33730766, second.devicesStats[1].currentWriteTime); + assertEquals(33730596, second.devicesStats[1].previousWriteTime); + assertEquals(33781827, first.devicesStats[1].currentQueueSize); + assertEquals(-1L, first.devicesStats[1].previousQueueSize); + assertEquals(1058193, first.devicesStats[1].currentIOTime); + assertEquals(-1L, first.devicesStats[1].previousIOTime); + assertThat(second.totalOperations, equalTo(575L)); assertThat(second.totalReadOperations, equalTo(261L)); assertThat(second.totalWriteOperations, equalTo(314L)); assertThat(second.totalReadKilobytes, equalTo(2392L)); assertThat(second.totalWriteKilobytes, equalTo(1236L)); + + assertEquals(64, second.totalReadTime); + assertEquals(170, second.totalWriteTime); + assertEquals(236, second.totalQueueSize); + assertEquals(158, second.totalIOTimeInMillis); } public void testAdjustForHugeFilesystems() throws Exception { From d46a012c9fb01f7b3fb58a1c8fff1e352fe9efb8 Mon Sep 17 00:00:00 2001 From: David Z <38449481+dzane17@users.noreply.github.com> Date: Tue, 17 Oct 2023 08:29:30 -0700 Subject: [PATCH 024/178] Fix flaky testEqualsAndHashcode in SearchRequestTests (#10649) Signed-off-by: David Zane --- .../java/org/opensearch/action/search/SearchRequestTests.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index cdd0ea863ce37..f025e3a63b9bf 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -244,7 +244,9 @@ private SearchRequest mutate(SearchRequest searchRequest) { ); mutators.add(() -> mutation.source(randomValueOtherThan(searchRequest.source(), this::createSearchSourceBuilder))); mutators.add(() -> mutation.setCcsMinimizeRoundtrips(searchRequest.isCcsMinimizeRoundtrips() == false)); - mutators.add(() -> mutation.setPhaseTook(searchRequest.isPhaseTook() == false)); + mutators.add( + () -> mutation.setPhaseTook(searchRequest.isPhaseTook() == null ? randomBoolean() : searchRequest.isPhaseTook() == false) + ); mutators.add( () -> mutation.setCancelAfterTimeInterval( searchRequest.getCancelAfterTimeInterval() != null From 7b62e2f69ba766991bde166ed02dc373e6e39fa6 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 18 Oct 2023 02:18:57 +0800 Subject: [PATCH 025/178] Fix dissect ingest processor parsing empty brackets failed (#9255) * Fix dissect ingest processor parsing empty brackets failed Signed-off-by: Gao Binlong * Modify change log Signed-off-by: Gao Binlong * Modify change log Signed-off-by: Gao Binlong * Add assertion Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong Signed-off-by: Daniel (dB.) Doubrovkine Co-authored-by: Daniel (dB.) Doubrovkine --- CHANGELOG.md | 1 + .../org/opensearch/dissect/DissectParser.java | 11 +++++- .../ingest/common/DissectProcessorTests.java | 24 +++++++++++++ .../test/ingest/200_dissect_processor.yml | 35 +++++++++++++++++++ 4 files changed, 70 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a02b5f4e7242e..284244de08829 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -111,6 +111,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Removed ### Fixed +- Fix failure in dissect ingest processor parsing empty brackets ([#9225](https://github.com/opensearch-project/OpenSearch/pull/9255)) - Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) - Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) - Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) diff --git a/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java b/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java index b6dc0ceb1028f..828d4b7de450e 100644 --- a/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java +++ b/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java @@ -231,7 +231,10 @@ public Map parse(String inputString) { int lookAheadMatches; // start walking the input string byte by byte, look ahead for matches where needed // if a match is found jump forward to the end of the match - for (; i < input.length; i++) { + while (i < input.length) { + // start is only used to record the value of i + int start = i; + lookAheadMatches = 0; // potential match between delimiter and input string if (delimiter.length > 0 && input[i] == delimiter[0]) { @@ -283,8 +286,14 @@ public Map parse(String inputString) { delimiter = dissectPair.getDelimiter().getBytes(StandardCharsets.UTF_8); // i is always one byte after the last found delimiter, aka the start of the next value valueStart = i; + } else { + i++; } + } else { + i++; } + // i should change anyway + assert (i != start); } // the last key, grab the rest of the input (unless consecutive delimiters already grabbed the last key) // and there is no trailing delimiter diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java index ca0c0df40f009..e42a1147825d1 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java @@ -155,4 +155,28 @@ public void testNullValueWithOutIgnoreMissing() { IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); } + + public void testMatchEmptyBrackets() { + IngestDocument ingestDocument = new IngestDocument( + "_index", + "_id", + null, + null, + null, + Collections.singletonMap("message", "[foo],[bar],[]") + ); + DissectProcessor dissectProcessor = new DissectProcessor("", null, "message", "[%{a}],[%{b}],[%{c}]", "", true); + dissectProcessor.execute(ingestDocument); + assertEquals("foo", ingestDocument.getFieldValue("a", String.class)); + assertEquals("bar", ingestDocument.getFieldValue("b", String.class)); + assertEquals("", ingestDocument.getFieldValue("c", String.class)); + + ingestDocument = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("message", "{}{}{}{baz}")); + dissectProcessor = new DissectProcessor("", null, "message", "{%{a}}{%{b}}{%{c}}{%{d}}", "", true); + dissectProcessor.execute(ingestDocument); + assertEquals("", ingestDocument.getFieldValue("a", String.class)); + assertEquals("", ingestDocument.getFieldValue("b", String.class)); + assertEquals("", ingestDocument.getFieldValue("c", String.class)); + assertEquals("baz", ingestDocument.getFieldValue("d", String.class)); + } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml index 916a7fe656cc2..d90e5fbf2362b 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml @@ -84,3 +84,38 @@ teardown: } ] } + +--- +"Test dissect processor can match empty brackets": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "dissect" : { + "field" : "message", + "pattern" : "[%{a}][%{b}][%{c}]" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: {message: "[foo][bar][]"} + + - do: + get: + index: test + id: 1 + - match: { _source.message: "[foo][bar][]" } + - match: { _source.a: "foo" } + - match: { _source.b: "bar" } + - match: { _source.c: "" } From a36ab39a3e7da385ccd3651313b1e953a87fae7a Mon Sep 17 00:00:00 2001 From: Gagan Juneja Date: Wed, 18 Oct 2023 03:42:59 +0530 Subject: [PATCH 026/178] Add telemetry tracer/metric enable flag and integ test (#10395) * Add telemetry tracer/metric enable flag and integ test Signed-off-by: Gagan Juneja * Add Changelog Signed-off-by: Gagan Juneja * Fix compilation issue Signed-off-by: Gagan Juneja * Empty-Commit Signed-off-by: Gagan Juneja * Add component flag to traceable wrappers Signed-off-by: Gagan Juneja * Address review comment Signed-off-by: Gagan Juneja * Address review comment Signed-off-by: Gagan Juneja * Address review comment Signed-off-by: Gagan Juneja * Address review comment Signed-off-by: Gagan Juneja * Address review comment Signed-off-by: Gagan Juneja * Address review comment Signed-off-by: Gagan Juneja --------- Signed-off-by: Gagan Juneja Signed-off-by: Gagan Juneja Co-authored-by: Gagan Juneja --- CHANGELOG.md | 1 + .../telemetry/metrics/MetricsTelemetry.java | 4 +- .../telemetry/tracing/DefaultTracer.java | 5 + .../opensearch/telemetry/tracing/Tracer.java | 6 ++ .../telemetry/tracing/noop/NoopTracer.java | 5 + .../telemetry/tracing/DefaultTracerTests.java | 1 + .../IntegrationTestOTelTelemetryPlugin.java | 5 +- .../InMemorySingletonMetricsExporter.java | 65 ++++++++++++ .../TelemetryMetricsDisabledSanityIT.java | 62 ++++++++++++ .../TelemetryMetricsEnabledSanityIT.java | 99 +++++++++++++++++++ .../TelemetryTracerDisabledSanityIT.java | 1 + .../TelemetryTracerEnabledSanityIT.java | 5 +- .../telemetry/OTelTelemetryPlugin.java | 27 +++-- .../metrics/OTelMetricsTelemetry.java | 9 +- .../telemetry/tracing/OTelTelemetry.java | 23 +++-- .../tracing/OTelTracingTelemetry.java | 15 +-- .../metrics/OTelMetricsTelemetryTests.java | 20 +++- .../tracing/OTelTracingTelemetryTests.java | 21 +++- .../common/settings/ClusterSettings.java | 8 +- .../main/java/org/opensearch/node/Node.java | 21 +++- .../telemetry/TelemetrySettings.java | 27 +++++ .../telemetry/tracing/WrappedTracer.java | 5 + .../channels/TraceableHttpChannel.java | 3 +- .../channels/TraceableRestChannel.java | 3 +- .../TraceableTcpTransportChannel.java | 3 +- .../TraceableTransportResponseHandler.java | 3 +- .../listener/TraceableActionListener.java | 3 +- .../metrics/MetricsRegistryFactoryTests.java | 12 +++ .../telemetry/tracing/TracerFactoryTests.java | 12 +++ .../telemetry/tracing/WrappedTracerTests.java | 5 +- .../test/OpenSearchIntegTestCase.java | 1 + .../test/OpenSearchSingleNodeTestCase.java | 2 + .../test/telemetry/MockTelemetry.java | 5 +- 33 files changed, 429 insertions(+), 58 deletions(-) rename plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/{tracing => }/IntegrationTestOTelTelemetryPlugin.java (85%) create mode 100644 plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java create mode 100644 plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java create mode 100644 plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 284244de08829..e379002d254ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -105,6 +105,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) - [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) - [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) +- Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) ### Deprecated diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java index 2f70c28efb1cd..fb3dec8152b4f 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java @@ -10,14 +10,12 @@ import org.opensearch.common.annotation.ExperimentalApi; -import java.io.Closeable; - /** * Interface for metrics telemetry providers * * @opensearch.experimental */ @ExperimentalApi -public interface MetricsTelemetry extends MetricsRegistry, Closeable { +public interface MetricsTelemetry extends MetricsRegistry { } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java index 79b7e4aca6c2f..a3bb64ea392a9 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -85,6 +85,11 @@ public SpanScope withSpanInScope(Span span) { return DefaultSpanScope.create(span, tracerContextStorage).attach(); } + @Override + public boolean isRecording() { + return true; + } + private Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) { return tracingTelemetry.createSpan(spanCreationContext, parentSpan); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java index e6d4878a5e833..8257d251e9560 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java @@ -53,4 +53,10 @@ public interface Tracer extends HttpTracer, Closeable { */ SpanScope withSpanInScope(Span span); + /** + * Tells if the traces are being recorded or not + * @return boolean + */ + boolean isRecording(); + } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java index c073e8d3e766f..50452ff5fe3b4 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java @@ -54,6 +54,11 @@ public SpanScope withSpanInScope(Span span) { return SpanScope.NO_OP; } + @Override + public boolean isRecording() { + return false; + } + @Override public void close() { diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java index 0a717e993cb81..2a791f1ae4164 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java @@ -62,6 +62,7 @@ public void testCreateSpan() { String spanName = defaultTracer.getCurrentSpan().getSpan().getSpanName(); assertEquals("span_name", spanName); + assertTrue(defaultTracer.isRecording()); } @SuppressWarnings("unchecked") diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/IntegrationTestOTelTelemetryPlugin.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java similarity index 85% rename from plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/IntegrationTestOTelTelemetryPlugin.java rename to plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java index ed4d13f3abb7d..45caf8bf5f60b 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/IntegrationTestOTelTelemetryPlugin.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java @@ -6,12 +6,9 @@ * compatible open source license. */ -package org.opensearch.telemetry.tracing; +package org.opensearch.telemetry; import org.opensearch.common.settings.Settings; -import org.opensearch.telemetry.OTelTelemetryPlugin; -import org.opensearch.telemetry.Telemetry; -import org.opensearch.telemetry.TelemetrySettings; import java.util.Optional; diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java new file mode 100644 index 0000000000000..74fc872cb30e3 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import java.util.Collection; +import java.util.List; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricExporter; + +public class InMemorySingletonMetricsExporter implements MetricExporter { + + public static final InMemorySingletonMetricsExporter INSTANCE = new InMemorySingletonMetricsExporter(InMemoryMetricExporter.create()); + + private static InMemoryMetricExporter delegate; + + public static InMemorySingletonMetricsExporter create() { + return INSTANCE; + } + + private InMemorySingletonMetricsExporter(InMemoryMetricExporter delegate) { + InMemorySingletonMetricsExporter.delegate = delegate; + } + + @Override + public CompletableResultCode export(Collection metrics) { + return delegate.export(metrics); + } + + @Override + public CompletableResultCode flush() { + return delegate.flush(); + } + + @Override + public CompletableResultCode shutdown() { + return delegate.shutdown(); + } + + public List getFinishedMetricItems() { + return delegate.getFinishedMetricItems(); + } + + /** + * Clears the state. + */ + public void reset() { + delegate.reset(); + } + + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return delegate.getAggregationTemporality(instrumentType); + } +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java new file mode 100644 index 0000000000000..bcdcb657c4f42 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, minNumDataNodes = 1) +public class TelemetryMetricsDisabledSanityIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), false) + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.InMemorySingletonMetricsExporter" + ) + .put(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IntegrationTestOTelTelemetryPlugin.class); + } + + @Override + protected boolean addMockTelemetryPlugin() { + return false; + } + + public void testSanityChecksWhenMetricsDisabled() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + + Counter counter = metricsRegistry.createCounter("test-counter", "test", "1"); + counter.add(1.0); + + Thread.sleep(2000); + + assertTrue(metricsRegistry instanceof NoopMetricsRegistry); + assertTrue(counter instanceof NoopCounter); + } + +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java new file mode 100644 index 0000000000000..ed341595d327d --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Collectors; + +import io.opentelemetry.sdk.metrics.data.DoublePointData; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 1) +public class TelemetryMetricsEnabledSanityIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), true) + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.InMemorySingletonMetricsExporter" + ) + .put(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IntegrationTestOTelTelemetryPlugin.class); + } + + @Override + protected boolean addMockTelemetryPlugin() { + return false; + } + + public void testCounter() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Counter counter = metricsRegistry.createCounter("test-counter", "test", "1"); + counter.add(1.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + double value = ((DoublePointData) ((ArrayList) exporter.getFinishedMetricItems() + .stream() + .filter(a -> a.getName().equals("test-counter")) + .collect(Collectors.toList()) + .get(0) + .getDoubleSumData() + .getPoints()).get(0)).getValue(); + assertEquals(1.0, value, 0.0); + } + + public void testUpDownCounter() throws Exception { + + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Counter counter = metricsRegistry.createUpDownCounter("test-up-down-counter", "test", "1"); + counter.add(1.0); + counter.add(-2.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + double value = ((DoublePointData) ((ArrayList) exporter.getFinishedMetricItems() + .stream() + .filter(a -> a.getName().equals("test-up-down-counter")) + .collect(Collectors.toList()) + .get(0) + .getDoubleSumData() + .getPoints()).get(0)).getValue(); + assertEquals(-1.0, value, 0.0); + } + + @After + public void reset() { + InMemorySingletonMetricsExporter.INSTANCE.reset(); + } +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java index 949a58f6cab41..45ed140e1be94 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java @@ -12,6 +12,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.test.OpenSearchIntegTestCase; diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java index 8a49a0abf5512..f07f2b308e801 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java @@ -12,6 +12,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.tracing.attributes.Attributes; @@ -88,9 +89,7 @@ public void testSanityChecksWhenTracingEnabled() throws Exception { ); InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.INSTANCE; - if (!exporter.getFinishedSpanItems().isEmpty()) { - validators.validate(exporter.getFinishedSpanItems(), 6); - } + validators.validate(exporter.getFinishedSpanItems(), 6); } private static void updateTelemetrySetting(Client client, boolean value) { diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java index b57876c9310f3..297ae8873636f 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java @@ -8,14 +8,13 @@ package org.opensearch.telemetry; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.TelemetryPlugin; -import org.opensearch.telemetry.metrics.OTelMetricsTelemetry; import org.opensearch.telemetry.tracing.OTelResourceProvider; import org.opensearch.telemetry.tracing.OTelTelemetry; -import org.opensearch.telemetry.tracing.OTelTracingTelemetry; import java.util.Arrays; import java.util.List; @@ -37,6 +36,8 @@ public class OTelTelemetryPlugin extends Plugin implements TelemetryPlugin { private final Settings settings; + private RefCountedReleasable refCountedOpenTelemetry; + /** * Creates Otel plugin * @param settings cluster settings @@ -58,20 +59,32 @@ public List> getSettings() { @Override public Optional getTelemetry(TelemetrySettings telemetrySettings) { + initializeOpenTelemetrySdk(telemetrySettings); return Optional.of(telemetry(telemetrySettings)); } + private void initializeOpenTelemetrySdk(TelemetrySettings telemetrySettings) { + if (refCountedOpenTelemetry != null) { + return; + } + OpenTelemetrySdk openTelemetrySdk = OTelResourceProvider.get(telemetrySettings, settings); + refCountedOpenTelemetry = new RefCountedReleasable<>("openTelemetry", openTelemetrySdk, openTelemetrySdk::close); + } + @Override public String getName() { return OTEL_TRACER_NAME; } private Telemetry telemetry(TelemetrySettings telemetrySettings) { - final OpenTelemetrySdk openTelemetry = OTelResourceProvider.get(telemetrySettings, settings); - return new OTelTelemetry( - new OTelTracingTelemetry<>(openTelemetry, openTelemetry.getSdkTracerProvider()), - new OTelMetricsTelemetry<>(openTelemetry.getSdkMeterProvider()) - ); + return new OTelTelemetry(refCountedOpenTelemetry); + } + + @Override + public void close() { + if (refCountedOpenTelemetry != null) { + refCountedOpenTelemetry.close(); + } } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java index 8598e5976d20d..6160e5106c041 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java @@ -8,6 +8,7 @@ package org.opensearch.telemetry.metrics; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.OTelTelemetryPlugin; import java.io.Closeable; @@ -19,19 +20,24 @@ import io.opentelemetry.api.metrics.DoubleUpDownCounter; import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.MeterProvider; +import io.opentelemetry.sdk.OpenTelemetrySdk; /** * OTel implementation for {@link MetricsTelemetry} */ public class OTelMetricsTelemetry implements MetricsTelemetry { + private final RefCountedReleasable refCountedOpenTelemetry; private final Meter otelMeter; private final T meterProvider; /** * Creates OTel based {@link MetricsTelemetry}. + * @param openTelemetry open telemetry. * @param meterProvider {@link MeterProvider} instance */ - public OTelMetricsTelemetry(T meterProvider) { + public OTelMetricsTelemetry(RefCountedReleasable openTelemetry, T meterProvider) { + this.refCountedOpenTelemetry = openTelemetry; + this.refCountedOpenTelemetry.incRef(); this.meterProvider = meterProvider; this.otelMeter = meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME); } @@ -63,5 +69,6 @@ public Counter createUpDownCounter(String name, String description, String unit) @Override public void close() throws IOException { meterProvider.close(); + refCountedOpenTelemetry.close(); } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java index 282fabd43346b..0c697d2cc5e8c 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java @@ -8,34 +8,39 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.metrics.OTelMetricsTelemetry; + +import io.opentelemetry.sdk.OpenTelemetrySdk; /** * Otel implementation of Telemetry */ public class OTelTelemetry implements Telemetry { - private final TracingTelemetry tracingTelemetry; - private final MetricsTelemetry metricsTelemetry; + private final RefCountedReleasable refCountedOpenTelemetry; /** * Creates Telemetry instance - * @param tracingTelemetry tracing telemetry - * @param metricsTelemetry metrics telemetry + + */ + /** + * Creates Telemetry instance + * @param refCountedOpenTelemetry open telemetry. */ - public OTelTelemetry(TracingTelemetry tracingTelemetry, MetricsTelemetry metricsTelemetry) { - this.tracingTelemetry = tracingTelemetry; - this.metricsTelemetry = metricsTelemetry; + public OTelTelemetry(RefCountedReleasable refCountedOpenTelemetry) { + this.refCountedOpenTelemetry = refCountedOpenTelemetry; } @Override public TracingTelemetry getTracingTelemetry() { - return tracingTelemetry; + return new OTelTracingTelemetry<>(refCountedOpenTelemetry, refCountedOpenTelemetry.get().getSdkTracerProvider()); } @Override public MetricsTelemetry getMetricsTelemetry() { - return metricsTelemetry; + return new OTelMetricsTelemetry<>(refCountedOpenTelemetry, refCountedOpenTelemetry.get().getSdkMeterProvider()); } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java index f88afe623fd56..af39617a8c744 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java @@ -8,31 +8,33 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.OTelAttributesConverter; import org.opensearch.telemetry.OTelTelemetryPlugin; import java.io.Closeable; import java.io.IOException; -import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.trace.TracerProvider; import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.OpenTelemetrySdk; /** * OTel based Telemetry provider */ public class OTelTracingTelemetry implements TracingTelemetry { - private final OpenTelemetry openTelemetry; + private final RefCountedReleasable refCountedOpenTelemetry; private final T tracerProvider; private final io.opentelemetry.api.trace.Tracer otelTracer; /** * Creates OTel based {@link TracingTelemetry} - * @param openTelemetry OpenTelemetry instance + * @param refCountedOpenTelemetry OpenTelemetry instance * @param tracerProvider {@link TracerProvider} instance. */ - public OTelTracingTelemetry(OpenTelemetry openTelemetry, T tracerProvider) { - this.openTelemetry = openTelemetry; + public OTelTracingTelemetry(RefCountedReleasable refCountedOpenTelemetry, T tracerProvider) { + this.refCountedOpenTelemetry = refCountedOpenTelemetry; + this.refCountedOpenTelemetry.incRef(); this.tracerProvider = tracerProvider; this.otelTracer = tracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME); } @@ -40,6 +42,7 @@ public OTelTracingTelemetry(OpenTelemetry openTelemetry, T tracerProvider) { @Override public void close() throws IOException { tracerProvider.close(); + refCountedOpenTelemetry.close(); } @Override @@ -49,7 +52,7 @@ public Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) @Override public TracingContextPropagator getContextPropagator() { - return new OTelTracingContextPropagator(openTelemetry); + return new OTelTracingContextPropagator(refCountedOpenTelemetry.get()); } private Span createOtelSpan(SpanCreationContext spanCreationContext, Span parentSpan) { diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java index 233c93e6b9a36..9de575b69774a 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java @@ -8,11 +8,13 @@ package org.opensearch.telemetry.metrics; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.OTelAttributesConverter; import org.opensearch.telemetry.OTelTelemetryPlugin; import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.test.OpenSearchTestCase; +import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.metrics.DoubleCounter; import io.opentelemetry.api.metrics.DoubleCounterBuilder; import io.opentelemetry.api.metrics.DoubleUpDownCounter; @@ -34,12 +36,16 @@ public void testCounter() { String description = "test"; String unit = "1"; Meter mockMeter = mock(Meter.class); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); DoubleCounter mockOTelDoubleCounter = mock(DoubleCounter.class); LongCounterBuilder mockOTelLongCounterBuilder = mock(LongCounterBuilder.class); DoubleCounterBuilder mockOTelDoubleCounterBuilder = mock(DoubleCounterBuilder.class); MeterProvider meterProvider = mock(MeterProvider.class); when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); - MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry(meterProvider); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); when(mockMeter.counterBuilder(counterName)).thenReturn(mockOTelLongCounterBuilder); when(mockOTelLongCounterBuilder.setDescription(description)).thenReturn(mockOTelLongCounterBuilder); when(mockOTelLongCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongCounterBuilder); @@ -59,6 +65,7 @@ public void testCounterNegativeValue() { String counterName = "test-counter"; String description = "test"; String unit = "1"; + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Meter mockMeter = mock(Meter.class); DoubleCounter mockOTelDoubleCounter = mock(DoubleCounter.class); LongCounterBuilder mockOTelLongCounterBuilder = mock(LongCounterBuilder.class); @@ -66,7 +73,10 @@ public void testCounterNegativeValue() { MeterProvider meterProvider = mock(MeterProvider.class); when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); - MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry(meterProvider); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); when(mockMeter.counterBuilder(counterName)).thenReturn(mockOTelLongCounterBuilder); when(mockOTelLongCounterBuilder.setDescription(description)).thenReturn(mockOTelLongCounterBuilder); when(mockOTelLongCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongCounterBuilder); @@ -83,6 +93,7 @@ public void testUpDownCounter() { String counterName = "test-counter"; String description = "test"; String unit = "1"; + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Meter mockMeter = mock(Meter.class); DoubleUpDownCounter mockOTelUpDownDoubleCounter = mock(DoubleUpDownCounter.class); LongUpDownCounterBuilder mockOTelLongUpDownCounterBuilder = mock(LongUpDownCounterBuilder.class); @@ -90,7 +101,10 @@ public void testUpDownCounter() { MeterProvider meterProvider = mock(MeterProvider.class); when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); - MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry(meterProvider); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); when(mockMeter.upDownCounterBuilder(counterName)).thenReturn(mockOTelLongUpDownCounterBuilder); when(mockOTelLongUpDownCounterBuilder.setDescription(description)).thenReturn(mockOTelLongUpDownCounterBuilder); when(mockOTelLongUpDownCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongUpDownCounterBuilder); diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java index 1a508ed252493..1f0c2f674e655 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java @@ -8,6 +8,7 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.OTelTelemetryPlugin; import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.test.OpenSearchTestCase; @@ -37,7 +38,10 @@ public void testCreateSpanWithoutParent() { when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder); Attributes attributes = Attributes.create().addAttribute("name", "value"); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry, mockTracerProvider); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), null); verify(mockSpanBuilder, never()).setParent(any()); verify(mockSpanBuilder).setAllAttributes(createAttribute(attributes)); @@ -59,7 +63,10 @@ public void testCreateSpanWithParent() { Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry, mockTracerProvider); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); Attributes attributes = Attributes.create().addAttribute("name", 1l); Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), parentSpan); @@ -85,7 +92,10 @@ public void testCreateSpanWithParentWithMultipleAttributes() { Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry, mockTracerProvider); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); Attributes attributes = Attributes.create() .addAttribute("key1", 1l) .addAttribute("key2", 2.0) @@ -125,7 +135,10 @@ public void testGetContextPropagator() { TracerProvider mockTracerProvider = mock(TracerProvider.class); when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry, mockTracerProvider); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); assertTrue(tracingTelemetry.getContextPropagator() instanceof OTelTracingContextPropagator); } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index bb0910c4e5f9c..90f91dcb7c553 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -700,6 +700,12 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING ), List.of(FeatureFlags.TELEMETRY), - List.of(TelemetrySettings.TRACER_ENABLED_SETTING, TelemetrySettings.TRACER_SAMPLER_PROBABILITY) + List.of( + TelemetrySettings.TRACER_ENABLED_SETTING, + TelemetrySettings.TRACER_SAMPLER_PROBABILITY, + TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING, + TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING, + TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING + ) ); } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index c456f01135dee..69b80462bbf0b 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -600,10 +600,23 @@ protected Node( MetricsRegistryFactory metricsRegistryFactory; if (FeatureFlags.isEnabled(TELEMETRY)) { final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterService.getClusterSettings()); - List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); - TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); - tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); - metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, telemetryModule.getTelemetry()); + if (telemetrySettings.isTracingFeatureEnabled() || telemetrySettings.isMetricsFeatureEnabled()) { + List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); + TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); + if (telemetrySettings.isTracingFeatureEnabled()) { + tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); + } else { + tracerFactory = new NoopTracerFactory(); + } + if (telemetrySettings.isMetricsFeatureEnabled()) { + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, telemetryModule.getTelemetry()); + } else { + metricsRegistryFactory = new NoopMetricsRegistryFactory(); + } + } else { + tracerFactory = new NoopTracerFactory(); + metricsRegistryFactory = new NoopMetricsRegistryFactory(); + } } else { tracerFactory = new NoopTracerFactory(); metricsRegistryFactory = new NoopMetricsRegistryFactory(); diff --git a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java index edb20cfa9dfc5..24dcab98c8870 100644 --- a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java +++ b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java @@ -28,6 +28,20 @@ public class TelemetrySettings { Setting.Property.Dynamic ); + public static final Setting TRACER_FEATURE_ENABLED_SETTING = Setting.boolSetting( + "telemetry.feature.tracer.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Final + ); + + public static final Setting METRICS_FEATURE_ENABLED_SETTING = Setting.boolSetting( + "telemetry.feature.metrics.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Final + ); + /** * Probability of sampler */ @@ -53,9 +67,14 @@ public class TelemetrySettings { private volatile boolean tracingEnabled; private volatile double samplingProbability; + private final boolean tracingFeatureEnabled; + private final boolean metricsFeatureEnabled; + public TelemetrySettings(Settings settings, ClusterSettings clusterSettings) { this.tracingEnabled = TRACER_ENABLED_SETTING.get(settings); this.samplingProbability = TRACER_SAMPLER_PROBABILITY.get(settings); + this.tracingFeatureEnabled = TRACER_FEATURE_ENABLED_SETTING.get(settings); + this.metricsFeatureEnabled = METRICS_FEATURE_ENABLED_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(TRACER_ENABLED_SETTING, this::setTracingEnabled); clusterSettings.addSettingsUpdateConsumer(TRACER_SAMPLER_PROBABILITY, this::setSamplingProbability); @@ -83,4 +102,12 @@ public void setSamplingProbability(double samplingProbability) { public double getSamplingProbability() { return samplingProbability; } + + public boolean isTracingFeatureEnabled() { + return tracingFeatureEnabled; + } + + public boolean isMetricsFeatureEnabled() { + return metricsFeatureEnabled; + } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java b/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java index b2308402379ac..631fb8242d78e 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java @@ -59,6 +59,11 @@ public SpanScope withSpanInScope(Span span) { return getDelegateTracer().withSpanInScope(span); } + @Override + public boolean isRecording() { + return getDelegateTracer().isRecording(); + } + @Override public void close() throws IOException { defaultTracer.close(); diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java index 0a9757310fe8b..e0fb690bd29be 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java @@ -8,7 +8,6 @@ package org.opensearch.telemetry.tracing.channels; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpResponse; @@ -50,7 +49,7 @@ private TraceableHttpChannel(HttpChannel delegate, Span span, Tracer tracer) { * @return http channel */ public static HttpChannel create(HttpChannel delegate, Span span, Tracer tracer) { - if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + if (tracer.isRecording() == true) { return new TraceableHttpChannel(delegate, span, tracer); } else { return delegate; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java index d256c9d4d0e53..32769dd1d848d 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java @@ -9,7 +9,6 @@ package org.opensearch.telemetry.tracing.channels; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.RestChannel; @@ -53,7 +52,7 @@ private TraceableRestChannel(RestChannel delegate, Span span, Tracer tracer) { * @return rest channel */ public static RestChannel create(RestChannel delegate, Span span, Tracer tracer) { - if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + if (tracer.isRecording() == true) { return new TraceableRestChannel(delegate, span, tracer); } else { return delegate; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java index bd60c35c3baac..45268b4807cd9 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java @@ -9,7 +9,6 @@ package org.opensearch.telemetry.tracing.channels; import org.opensearch.Version; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.core.transport.TransportResponse; import org.opensearch.telemetry.tracing.Span; @@ -53,7 +52,7 @@ public TraceableTcpTransportChannel(TcpTransportChannel delegate, Span span, Tra * @return transport channel */ public static TransportChannel create(TcpTransportChannel delegate, final Span span, final Tracer tracer) { - if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + if (tracer.isRecording() == true) { delegate.getChannel().addCloseListener(new ActionListener() { @Override public void onResponse(Void unused) { diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java index 538bf82a1dbec..eb9d53d2df51b 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java @@ -8,7 +8,6 @@ package org.opensearch.telemetry.tracing.handler; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.transport.TransportResponse; import org.opensearch.telemetry.tracing.Span; @@ -55,7 +54,7 @@ public static TransportResponseHandler create( Span span, Tracer tracer ) { - if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + if (tracer.isRecording() == true) { return new TraceableTransportResponseHandler(delegate, span, tracer); } else { return delegate; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java index 3e201641a529b..0cb4ce71d05f8 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java @@ -8,7 +8,6 @@ package org.opensearch.telemetry.tracing.listener; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.telemetry.tracing.Span; import org.opensearch.telemetry.tracing.SpanScope; @@ -47,7 +46,7 @@ private TraceableActionListener(ActionListener delegate, Span span, Tr * @return action listener */ public static ActionListener create(ActionListener delegate, Span span, Tracer tracer) { - if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + if (tracer.isRecording() == true) { return new TraceableActionListener(delegate, span, tracer); } else { return delegate; diff --git a/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java b/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java index 5d5ea62dd161e..80942123fd4fd 100644 --- a/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java +++ b/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java @@ -63,6 +63,18 @@ public void testGetMetricsWithAvailableMetricsTelemetry() { } + public void testNullMetricsTelemetry() { + Settings settings = Settings.builder().put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getMetricsTelemetry()).thenReturn(null); + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, Optional.of(mockTelemetry)); + + MetricsRegistry metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); + assertTrue(metricsRegistry instanceof NoopMetricsRegistry); + + } + private Set> getClusterSettings() { Set> allTracerSettings = new HashSet<>(); ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java index b27f888eaf502..3a388be22445e 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java @@ -83,6 +83,18 @@ public void testGetTracerWithAvailableTracingTelemetryReturnsWrappedTracer() { } + public void testNullTracer() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(null); + tracerFactory = new TracerFactory(telemetrySettings, Optional.of(mockTelemetry), new ThreadContext(Settings.EMPTY)); + + Tracer tracer = tracerFactory.getTracer(); + assertTrue(tracer instanceof NoopTracer); + + } + private Set> getClusterSettings() { Set> allTracerSettings = new HashSet<>(); ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java index 43e0cb8e44439..8606104d26103 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java @@ -26,6 +26,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class WrappedTracerTests extends OpenSearchTestCase { @@ -38,6 +39,7 @@ public void testStartSpanWithTracingDisabledInvokesNoopTracer() throws Exception SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); wrappedTracer.startSpan(spanCreationContext); assertTrue(wrappedTracer.getDelegateTracer() instanceof NoopTracer); + assertFalse(wrappedTracer.isRecording()); verify(mockDefaultTracer, never()).startSpan(SpanCreationContext.internal().name("foo")); } } @@ -46,12 +48,13 @@ public void testStartSpanWithTracingEnabledInvokesDefaultTracer() throws Excepti Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true).build(); TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); DefaultTracer mockDefaultTracer = mock(DefaultTracer.class); - + when(mockDefaultTracer.isRecording()).thenReturn(true); try (WrappedTracer wrappedTracer = new WrappedTracer(telemetrySettings, mockDefaultTracer)) { SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); wrappedTracer.startSpan(spanCreationContext); assertTrue(wrappedTracer.getDelegateTracer() instanceof DefaultTracer); + assertTrue(wrappedTracer.isRecording()); verify(mockDefaultTracer).startSpan(eq(spanCreationContext)); } } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index da829a3bc5225..c16cc1d2a5fba 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -1928,6 +1928,7 @@ protected Settings nodeSettings(int nodeOrdinal) { // Enable tracer only when Telemetry Setting is enabled if (featureFlagSettings().getAsBoolean(FeatureFlags.TELEMETRY_SETTING.getKey(), false)) { + builder.put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true); builder.put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true); } if (FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING.get(featureFlagSettings)) { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java index f14fe3bf3961c..efc29d1c254e6 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java @@ -254,6 +254,7 @@ private Node newNode() { .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) .put(FeatureFlags.TELEMETRY_SETTING.getKey(), true) .put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true) + .put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true) .put(nodeSettings()) // allow test cases to provide their own settings or override these .put(featureFlagSettings); if (FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING.get(featureFlagSettings)) { @@ -271,6 +272,7 @@ private Node newNode() { plugins.add(MockHttpTransport.TestPlugin.class); } plugins.add(MockScriptService.TestPlugin.class); + plugins.add(MockTelemetryPlugin.class); Node node = new MockNode(settingsBuilder.build(), plugins, forbidPrivateIndexSettings()); try { diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java index 95321a7009be9..dda413ce2818e 100644 --- a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java @@ -12,6 +12,7 @@ import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.metrics.Counter; import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.metrics.noop.NoopCounter; import org.opensearch.telemetry.tracing.TracingTelemetry; import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; @@ -37,12 +38,12 @@ public MetricsTelemetry getMetricsTelemetry() { return new MetricsTelemetry() { @Override public Counter createCounter(String name, String description, String unit) { - return null; + return NoopCounter.INSTANCE; } @Override public Counter createUpDownCounter(String name, String description, String unit) { - return null; + return NoopCounter.INSTANCE; } @Override From 8b04521ffb8b5be312d1814a208560d53f4c8ae7 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 17 Oct 2023 16:00:35 -0700 Subject: [PATCH 027/178] Fix flaky test testFlushThrowsFlushFailedExceptionOnCorruption. (#10671) This test is flaky in two ways, one it may hit a case where a merge occurs and the hardcoded segment does not exist to delete. Two with certain mock filesystems a corruption exception won't be thrown here, only a NoSuchFile - This doesn't change our handling in the engine, but the test should conditionally handle if the store is marked corrupted on close. Signed-off-by: Marc Handalian --- .../index/engine/NRTReplicationEngineTests.java | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 09484cd1b5840..57509c5daa2b1 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -617,11 +617,18 @@ public void testFlushThrowsFlushFailedExceptionOnCorruption() throws Exception { indexOperations(nrtEngine, operations); // wipe the nrt directory initially so we can sync with primary. cleanAndCopySegmentsFromPrimary(nrtEngine); - nrtEngineStore.directory().deleteFile("_0.si"); + final Optional toDelete = Set.of(nrtEngineStore.directory().listAll()).stream().filter(f -> f.endsWith(".si")).findAny(); + assertTrue(toDelete.isPresent()); + nrtEngineStore.directory().deleteFile(toDelete.get()); assertThrows(FlushFailedEngineException.class, nrtEngine::flush); - assertTrue(nrtEngineStore.isMarkedCorrupted()); - // store will throw when eventually closed, not handled here. - assertThrows(RuntimeException.class, nrtEngineStore::close); + nrtEngine.close(); + if (nrtEngineStore.isMarkedCorrupted()) { + assertThrows(RuntimeException.class, nrtEngineStore::close); + } else { + // With certain mock directories a NoSuchFileException is thrown which is not treated as a + // corruption Exception. In these cases we don't expect any issue on store close. + nrtEngineStore.close(); + } } private void copySegments(Collection latestPrimaryFiles, Engine nrtEngine) throws IOException { From 9cc0e7db3b5dfaa9b3dd111eba9d1fb4d9185549 Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Wed, 18 Oct 2023 07:26:36 +0530 Subject: [PATCH 028/178] Change filenames for IndexMetadata and Manifest (#10557) * Change filenames for IndexMetadata and Manifest Signed-off-by: Dhwanil Patel --- CHANGELOG.md | 1 + .../remote/RemoteClusterStateService.java | 38 ++++++++++++------- .../RemoteClusterStateServiceTests.java | 38 +++++++++++++++++++ 3 files changed, 63 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e379002d254ae..3471564dba15e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) - [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) - [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) +- Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 4a8a0618ffa60..0cf97de53d5f3 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -120,6 +120,9 @@ public class RemoteClusterStateService implements Closeable { private final AtomicBoolean deleteStaleMetadataRunning = new AtomicBoolean(false); + public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 1; + public static final int MANIFEST_CURRENT_CODEC_VERSION = 1; + public RemoteClusterStateService( String nodeId, Supplier repositoriesService, @@ -426,7 +429,7 @@ private ClusterMetadataManifest uploadManifest( boolean committed ) throws IOException { synchronized (this) { - final String manifestFileName = getManifestFileName(clusterState.term(), clusterState.version()); + final String manifestFileName = getManifestFileName(clusterState.term(), clusterState.version(), committed); final ClusterMetadataManifest manifest = new ClusterMetadataManifest( clusterState.term(), clusterState.getVersion(), @@ -488,22 +491,30 @@ private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { this.slowWriteLoggingThreshold = slowWriteLoggingThreshold; } - private static String getManifestFileName(long term, long version) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest_2147483642_2147483637_456536447 - return String.join(DELIMITER, getManifestFileNamePrefix(term, version), RemoteStoreUtils.invertLong(System.currentTimeMillis())); - } - - private static String getManifestFileNamePrefix(long term, long version) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest_2147483642_2147483637 - return String.join(DELIMITER, MANIFEST_PATH_TOKEN, RemoteStoreUtils.invertLong(term), RemoteStoreUtils.invertLong(version)); + static String getManifestFileName(long term, long version, boolean committed) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest______C/P____ + return String.join( + DELIMITER, + MANIFEST_PATH_TOKEN, + RemoteStoreUtils.invertLong(term), + RemoteStoreUtils.invertLong(version), + (committed ? "C" : "P"), // C for committed and P for published + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(MANIFEST_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last place to + // determine codec version. + ); } - private static String indexMetadataFileName(IndexMetadata indexMetadata) { + static String indexMetadataFileName(IndexMetadata indexMetadata) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index//metadata______ return String.join( DELIMITER, INDEX_METADATA_FILE_PREFIX, - String.valueOf(indexMetadata.getVersion()), - String.valueOf(System.currentTimeMillis()) + RemoteStoreUtils.invertLong(indexMetadata.getVersion()), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last + // place to determine codec version. ); } @@ -916,8 +927,7 @@ private void deleteClusterMetadata( if (filesToKeep.contains(uploadedIndexMetadata.getUploadedFilename()) == false) { staleIndexMetadataPaths.add( new BlobPath().add(INDEX_PATH_TOKEN).add(uploadedIndexMetadata.getIndexUUID()).buildAsString() - + uploadedIndexMetadata.getUploadedFilename() - + ".dat" + + INDEX_METADATA_FORMAT.blobName(uploadedIndexMetadata.getUploadedFilename()) ); } }); diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 6ecbc23f75bee..119d19cc34981 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -34,6 +34,7 @@ import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; +import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.repositories.FilterRepository; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryMissingException; @@ -65,6 +66,9 @@ import org.mockito.ArgumentMatchers; import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_FILE_PREFIX; +import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_FILE_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -673,6 +677,40 @@ public void testDeleteStaleClusterUUIDs() throws IOException { } } + public void testFileNames() { + final Index index = new Index("test-index", "index-uuid"); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + + String indexMetadataFileName = RemoteClusterStateService.indexMetadataFileName(indexMetadata); + String[] splittedIndexMetadataFileName = indexMetadataFileName.split(DELIMITER); + assertThat(indexMetadataFileName.split(DELIMITER).length, is(4)); + assertThat(splittedIndexMetadataFileName[0], is(INDEX_METADATA_FILE_PREFIX)); + assertThat(splittedIndexMetadataFileName[1], is(RemoteStoreUtils.invertLong(indexMetadata.getVersion()))); + assertThat(splittedIndexMetadataFileName[3], is(String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION))); + + int term = randomIntBetween(5, 10); + int version = randomIntBetween(5, 10); + String manifestFileName = RemoteClusterStateService.getManifestFileName(term, version, true); + assertThat(manifestFileName.split(DELIMITER).length, is(6)); + String[] splittedName = manifestFileName.split(DELIMITER); + assertThat(splittedName[0], is(MANIFEST_FILE_PREFIX)); + assertThat(splittedName[1], is(RemoteStoreUtils.invertLong(term))); + assertThat(splittedName[2], is(RemoteStoreUtils.invertLong(version))); + assertThat(splittedName[3], is("C")); + assertThat(splittedName[5], is(String.valueOf(MANIFEST_CURRENT_CODEC_VERSION))); + + manifestFileName = RemoteClusterStateService.getManifestFileName(term, version, false); + splittedName = manifestFileName.split(DELIMITER); + assertThat(splittedName[3], is("P")); + } + private void mockObjectsForGettingPreviousClusterUUID(Map clusterUUIDsPointers) throws IOException { final BlobPath blobPath = mock(BlobPath.class); when((blobStoreRepository.basePath())).thenReturn(blobPath); From 8276cbdf0749c4bcb62a53312ea6009a830f1e41 Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Wed, 18 Oct 2023 17:11:06 +0530 Subject: [PATCH 029/178] revive remote cluster state auto restore integ tests (#10503) * revive remote cluster state auto restore integ tests Signed-off-by: bansvaru --- .../RemoteStoreBaseIntegTestCase.java | 8 +- .../RemoteStoreClusterStateRestoreIT.java | 231 ++++++------------ .../opensearch/test/InternalTestCluster.java | 10 +- 3 files changed, 82 insertions(+), 167 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index e2ef5f85abc74..bccca283ba772 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -353,7 +353,13 @@ public void assertRemoteStoreRepositoryOnAllNodes(String repositoryName) { // Validated that all the restricted settings are entact on all the nodes. repository.getRestrictedSystemRepositorySettings() .stream() - .forEach(setting -> assertEquals(setting.get(actualRepository.settings()), setting.get(expectedRepository.settings()))); + .forEach( + setting -> assertEquals( + String.format(Locale.ROOT, "Restricted Settings mismatch [%s]", setting.getKey()), + setting.get(actualRepository.settings()), + setting.get(expectedRepository.settings()) + ) + ); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 5e92bb195680b..3df4cc4e34d93 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -8,23 +8,22 @@ package org.opensearch.remotestore; -import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.settings.Settings; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; import java.nio.file.Files; -import java.util.Locale; +import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.concurrent.ExecutionException; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; -import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; -import static org.opensearch.indices.ShardLimitValidator.SETTING_MAX_SHARDS_PER_CLUSTER_KEY; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreClusterStateRestoreIT extends BaseRemoteStoreRestoreIT { @@ -48,47 +47,10 @@ private Map initialTestSetup(int shardCount, int replicaCount, int private void resetCluster(int dataNodeCount, int clusterManagerNodeCount) { internalCluster().stopAllNodes(); - addNewNodes(dataNodeCount, clusterManagerNodeCount); + internalCluster().startClusterManagerOnlyNodes(clusterManagerNodeCount); + internalCluster().startDataOnlyNodes(dataNodeCount); } - private void restoreAndValidate(String clusterUUID, Map indexStats) throws Exception { - restoreAndValidate(clusterUUID, indexStats, true); - } - - private void restoreAndValidate(String clusterUUID, Map indexStats, boolean validate) throws Exception { - // TODO once auto restore is merged, the remote cluster state will be restored - - if (validate) { - // Step - 4 validation restore is successful. - ensureGreen(INDEX_NAME); - verifyRestoredData(indexStats, INDEX_NAME); - } - } - - private void restoreAndValidateFails( - String clusterUUID, - PlainActionFuture actionListener, - Class clazz, - String errorSubString - ) { - - try { - restoreAndValidate(clusterUUID, null, false); - } catch (Exception e) { - assertTrue( - String.format(Locale.ROOT, "%s %s", clazz, e), - clazz.isAssignableFrom(e.getClass()) - || clazz.isAssignableFrom(e.getCause().getClass()) - || (e.getCause().getCause() != null && clazz.isAssignableFrom(e.getCause().getCause().getClass())) - ); - assertTrue( - String.format(Locale.ROOT, "Error message mismatch. Expected: [%s]. Actual: [%s]", errorSubString, e.getMessage()), - e.getMessage().contains(errorSubString) - ); - } - } - - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") public void testFullClusterRestore() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; @@ -106,10 +68,10 @@ public void testFullClusterRestore() throws Exception { assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; // Step - 3 Trigger full cluster restore and validate - restoreAndValidate(prevClusterUUID, indexStats); + validateMetadata(List.of(INDEX_NAME)); + verifyRestoredData(indexStats, INDEX_NAME); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") public void testFullClusterRestoreMultipleIndices() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; @@ -134,155 +96,100 @@ public void testFullClusterRestoreMultipleIndices() throws Exception { assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; // Step - 3 Trigger full cluster restore - restoreAndValidate(prevClusterUUID, indexStats); - ensureGreen(secondIndexName); - verifyRestoredData(indexStats2, secondIndexName); + validateMetadata(List.of(INDEX_NAME, secondIndexName)); + verifyRestoredData(indexStats, INDEX_NAME); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") - public void testFullClusterRestoreFailureValidationFailures() throws Exception { + public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathThrowsException() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; int dataNodeCount = shardCount * (replicaCount + 1); int clusterManagerNodeCount = 1; - // index some data to generate files in remote directory - Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); - String prevClusterUUID = clusterService().state().metadata().clusterUUID(); - - // Start of Test - 1 - // Test - 1 Trigger full cluster restore and validate it fails due to incorrect cluster UUID - PlainActionFuture future = PlainActionFuture.newFuture(); - restoreAndValidateFails("randomUUID", future, IllegalStateException.class, "Remote Cluster State not found - randomUUID"); - // End of Test - 1 + // Step - 1 index some data to generate files in remote directory + initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); - // Start of Test - 3 - // Test - 2 Trigger full cluster restore and validate it fails due to cluster UUID same as current cluster UUID - future = PlainActionFuture.newFuture(); - restoreAndValidateFails( - clusterService().state().metadata().clusterUUID(), - future, - IllegalArgumentException.class, - "clusterUUID to restore from should be different from current cluster UUID" - ); - // End of Test - 2 + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + String clusterName = clusterService().state().getClusterName().value(); - // Start of Test - 3 // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata - // Restarting cluster with just 1 data node helps with applying cluster settings - resetCluster(1, clusterManagerNodeCount); - String newClusterUUID = clusterService().state().metadata().clusterUUID(); - assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; - - reduceShardLimits(1, 1); - - // Step - 4 Trigger full cluster restore and validate it fails - future = PlainActionFuture.newFuture(); - restoreAndValidateFails( - prevClusterUUID, - future, - IllegalArgumentException.class, - "this action would add [2] total shards, but this cluster currently has [0]/[1] maximum shards open" - ); - resetShardLimits(); - // End of Test - 3 - - // Start of Test - 4 - // Test -4 Reset cluster and trigger full restore with same name index in the cluster - // Test -4 Add required nodes for this test after last reset. - addNewNodes(dataNodeCount - 1, 0); - - newClusterUUID = clusterService().state().metadata().clusterUUID(); - assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; - - // Test -4 Step - 2 Create a new index with same name - createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 1)); - ensureYellowAndNoInitializingShards(INDEX_NAME); - ensureGreen(INDEX_NAME); - - future = PlainActionFuture.newFuture(); - - // Test -4 Step - 3 Trigger full cluster restore and validate fails - restoreAndValidateFails( - prevClusterUUID, - future, - IllegalStateException.class, - "cannot restore index [remote-store-test-idx-1] because an open index with same name/uuid already exists in the cluster" - ); + internalCluster().stopAllNodes(); + // Step - 3 Delete index metadata file in remote + try { + Files.move( + segmentRepoPath.resolve( + RemoteClusterStateService.encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index" + ), + segmentRepoPath.resolve("cluster-state/") + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + assertThrows(IllegalStateException.class, () -> addNewNodes(dataNodeCount, clusterManagerNodeCount)); + // Test is complete - // Test -4 Step - 4 validation restore is successful. - ensureGreen(INDEX_NAME); - // End of Test - 4 + // Starting a node without remote state to ensure test cleanup + internalCluster().startNode(Settings.builder().put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false).build()); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") - public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathThrowsException() throws Exception { + public void testRemoteStateFullRestart() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; int dataNodeCount = shardCount * (replicaCount + 1); - int clusterManagerNodeCount = 1; - - // Step - 1 index some data to generate files in remote directory - initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + int clusterManagerNodeCount = 3; + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); String prevClusterUUID = clusterService().state().metadata().clusterUUID(); - - // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata - resetCluster(dataNodeCount, clusterManagerNodeCount); - - String newClusterUUID = clusterService().state().metadata().clusterUUID(); - assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; - - // Step - 4 Delete index metadata file in remote + // Delete index metadata file in remote try { Files.move( segmentRepoPath.resolve( RemoteClusterStateService.encodeString(clusterService().state().getClusterName().value()) + "/cluster-state/" + prevClusterUUID - + "/index" + + "/manifest" ), segmentRepoPath.resolve("cluster-state/") ); } catch (IOException e) { throw new RuntimeException(e); } - - // Step - 5 Trigger full cluster restore and validate fails - PlainActionFuture future = PlainActionFuture.newFuture(); - restoreAndValidateFails(prevClusterUUID, future, IllegalStateException.class, "asdsa"); + internalCluster().fullRestart(); + ensureGreen(INDEX_NAME); + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert Objects.equals(newClusterUUID, prevClusterUUID) : "Full restart not successful. cluster uuid has changed"; + validateCurrentMetadata(); + verifyRestoredData(indexStats, INDEX_NAME); } - private void reduceShardLimits(int maxShardsPerNode, int maxShardsPerCluster) { - // Step 3 - Reduce shard limits to hit shard limit with less no of shards - try { - client().admin() - .cluster() - .updateSettings( - new ClusterUpdateSettingsRequest().transientSettings( - Settings.builder() - .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode) - .put(SETTING_MAX_SHARDS_PER_CLUSTER_KEY, maxShardsPerCluster) - ) - ) - .get(); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); + private void validateMetadata(List indexNames) { + assertEquals(clusterService().state().metadata().indices().size(), indexNames.size()); + for (String indexName : indexNames) { + assertTrue(clusterService().state().metadata().hasIndex(indexName)); } } - private void resetShardLimits() { - // Step - 5 Reset the cluster settings - ClusterUpdateSettingsRequest resetRequest = new ClusterUpdateSettingsRequest(); - resetRequest.transientSettings( - Settings.builder().putNull(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()).putNull(SETTING_MAX_SHARDS_PER_CLUSTER_KEY) + private void validateCurrentMetadata() throws Exception { + RemoteClusterStateService remoteClusterStateService = internalCluster().getInstance( + RemoteClusterStateService.class, + internalCluster().getClusterManagerName() ); - - try { - client().admin().cluster().updateSettings(resetRequest).get(); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); - } + assertBusy(() -> { + ClusterMetadataManifest manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().metadata().clusterUUID() + ).get(); + ClusterState clusterState = getClusterState(); + Metadata currentMetadata = clusterState.metadata(); + assertEquals(currentMetadata.indices().size(), manifest.getIndices().size()); + assertEquals(currentMetadata.coordinationMetadata().term(), manifest.getClusterTerm()); + assertEquals(clusterState.version(), manifest.getStateVersion()); + assertEquals(clusterState.stateUUID(), manifest.getStateUUID()); + assertEquals(currentMetadata.clusterUUIDCommitted(), manifest.isClusterUUIDCommitted()); + for (UploadedIndexMetadata uploadedIndexMetadata : manifest.getIndices()) { + IndexMetadata currentIndexMetadata = currentMetadata.index(uploadedIndexMetadata.getIndexName()); + assertEquals(currentIndexMetadata.getIndex().getUUID(), uploadedIndexMetadata.getIndexUUID()); + } + }); } - } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 3c7423f73685f..a290f3a1751a0 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -1853,10 +1853,12 @@ public synchronized void stopRandomNodeNotCurrentMaster() throws IOException { */ public void stopAllNodes() { try { - int totalDataNodes = numDataNodes(); - while (totalDataNodes > 0) { - stopRandomDataNode(); - totalDataNodes -= 1; + if (numDataAndClusterManagerNodes() != numClusterManagerNodes()) { + int totalDataNodes = numDataNodes(); + while (totalDataNodes > 0) { + stopRandomDataNode(); + totalDataNodes -= 1; + } } int totalClusterManagerNodes = numClusterManagerNodes(); while (totalClusterManagerNodes > 1) { From 4632f4b17e5bbd6077104d7b68f2bd28336a5922 Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Wed, 18 Oct 2023 17:11:26 +0530 Subject: [PATCH 030/178] Upload global cluster state to remote store (#10404) * Upload global cluster state to remote store Signed-off-by: Dhwanil Patel --- CHANGELOG.md | 5 +- .../remote/ClusterMetadataManifest.java | 128 ++++++++- .../remote/RemoteClusterStateService.java | 201 +++++++++++++- .../blobstore/BlobStoreRepository.java | 8 +- .../blobstore/ChecksumBlobStoreFormat.java | 15 +- .../coordination/CoordinationStateTests.java | 2 + .../remote/ClusterMetadataManifestTests.java | 31 +++ .../RemoteClusterStateServiceTests.java | 260 +++++++++++++++++- .../snapshots/BlobStoreFormatTests.java | 6 +- 9 files changed, 617 insertions(+), 39 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3471564dba15e..243423223ca52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) - [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) - [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) -- Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) +- [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) +- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -121,4 +122,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java index 40b16f3d6323b..97b37d9532f85 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -33,6 +33,9 @@ */ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { + public static final int CODEC_V0 = 0; // Older codec version, where we haven't introduced codec versions for manifest. + public static final int CODEC_V1 = 1; // In Codec V1 we have introduced global-metadata and codec version in Manifest file. + private static final ParseField CLUSTER_TERM_FIELD = new ParseField("cluster_term"); private static final ParseField STATE_VERSION_FIELD = new ParseField("state_version"); private static final ParseField CLUSTER_UUID_FIELD = new ParseField("cluster_uuid"); @@ -40,6 +43,8 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { private static final ParseField OPENSEARCH_VERSION_FIELD = new ParseField("opensearch_version"); private static final ParseField NODE_ID_FIELD = new ParseField("node_id"); private static final ParseField COMMITTED_FIELD = new ParseField("committed"); + private static final ParseField CODEC_VERSION_FIELD = new ParseField("codec_version"); + private static final ParseField GLOBAL_METADATA_FIELD = new ParseField("global_metadata"); private static final ParseField INDICES_FIELD = new ParseField("indices"); private static final ParseField PREVIOUS_CLUSTER_UUID = new ParseField("previous_cluster_uuid"); private static final ParseField CLUSTER_UUID_COMMITTED = new ParseField("cluster_uuid_committed"); @@ -84,7 +89,33 @@ private static boolean clusterUUIDCommitted(Object[] fields) { return (boolean) fields[9]; } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + private static int codecVersion(Object[] fields) { + return (int) fields[10]; + } + + private static String globalMetadataFileName(Object[] fields) { + return (String) fields[11]; + } + + private static final ConstructingObjectParser PARSER_V0 = new ConstructingObjectParser<>( + "cluster_metadata_manifest", + fields -> new ClusterMetadataManifest( + term(fields), + version(fields), + clusterUUID(fields), + stateUUID(fields), + opensearchVersion(fields), + nodeId(fields), + committed(fields), + CODEC_V0, + null, + indices(fields), + previousClusterUUID(fields), + clusterUUIDCommitted(fields) + ) + ); + + private static final ConstructingObjectParser PARSER_V1 = new ConstructingObjectParser<>( "cluster_metadata_manifest", fields -> new ClusterMetadataManifest( term(fields), @@ -94,29 +125,45 @@ private static boolean clusterUUIDCommitted(Object[] fields) { opensearchVersion(fields), nodeId(fields), committed(fields), + codecVersion(fields), + globalMetadataFileName(fields), indices(fields), previousClusterUUID(fields), clusterUUIDCommitted(fields) ) ); + private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V1; + static { - PARSER.declareLong(ConstructingObjectParser.constructorArg(), CLUSTER_TERM_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), STATE_VERSION_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), STATE_UUID_FIELD); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), OPENSEARCH_VERSION_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), NODE_ID_FIELD); - PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), COMMITTED_FIELD); - PARSER.declareObjectArray( + declareParser(PARSER_V0, CODEC_V0); + declareParser(PARSER_V1, CODEC_V1); + } + + private static void declareParser(ConstructingObjectParser parser, long codec_version) { + parser.declareLong(ConstructingObjectParser.constructorArg(), CLUSTER_TERM_FIELD); + parser.declareLong(ConstructingObjectParser.constructorArg(), STATE_VERSION_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), STATE_UUID_FIELD); + parser.declareInt(ConstructingObjectParser.constructorArg(), OPENSEARCH_VERSION_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), NODE_ID_FIELD); + parser.declareBoolean(ConstructingObjectParser.constructorArg(), COMMITTED_FIELD); + parser.declareObjectArray( ConstructingObjectParser.constructorArg(), (p, c) -> UploadedIndexMetadata.fromXContent(p), INDICES_FIELD ); - PARSER.declareString(ConstructingObjectParser.constructorArg(), PREVIOUS_CLUSTER_UUID); - PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_COMMITTED); + parser.declareString(ConstructingObjectParser.constructorArg(), PREVIOUS_CLUSTER_UUID); + parser.declareBoolean(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_COMMITTED); + + if (codec_version >= CODEC_V1) { + parser.declareInt(ConstructingObjectParser.constructorArg(), CODEC_VERSION_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), GLOBAL_METADATA_FIELD); + } } + private final int codecVersion; + private final String globalMetadataFileName; private final List indices; private final long clusterTerm; private final long stateVersion; @@ -168,6 +215,14 @@ public boolean isClusterUUIDCommitted() { return clusterUUIDCommitted; } + public int getCodecVersion() { + return codecVersion; + } + + public String getGlobalMetadataFileName() { + return globalMetadataFileName; + } + public ClusterMetadataManifest( long clusterTerm, long version, @@ -176,6 +231,8 @@ public ClusterMetadataManifest( Version opensearchVersion, String nodeId, boolean committed, + int codecVersion, + String globalMetadataFileName, List indices, String previousClusterUUID, boolean clusterUUIDCommitted @@ -187,6 +244,8 @@ public ClusterMetadataManifest( this.opensearchVersion = opensearchVersion; this.nodeId = nodeId; this.committed = committed; + this.codecVersion = codecVersion; + this.globalMetadataFileName = globalMetadataFileName; this.indices = Collections.unmodifiableList(indices); this.previousClusterUUID = previousClusterUUID; this.clusterUUIDCommitted = clusterUUIDCommitted; @@ -203,6 +262,13 @@ public ClusterMetadataManifest(StreamInput in) throws IOException { this.indices = Collections.unmodifiableList(in.readList(UploadedIndexMetadata::new)); this.previousClusterUUID = in.readString(); this.clusterUUIDCommitted = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.codecVersion = in.readInt(); + this.globalMetadataFileName = in.readString(); + } else { + this.codecVersion = CODEC_V0; // Default codec + this.globalMetadataFileName = null; + } } public static Builder builder() { @@ -231,6 +297,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endArray(); builder.field(PREVIOUS_CLUSTER_UUID.getPreferredName(), getPreviousClusterUUID()); builder.field(CLUSTER_UUID_COMMITTED.getPreferredName(), isClusterUUIDCommitted()); + if (onOrAfterCodecVersion(CODEC_V1)) { + builder.field(CODEC_VERSION_FIELD.getPreferredName(), getCodecVersion()); + builder.field(GLOBAL_METADATA_FIELD.getPreferredName(), getGlobalMetadataFileName()); + } return builder; } @@ -246,6 +316,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(indices); out.writeString(previousClusterUUID); out.writeBoolean(clusterUUIDCommitted); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeInt(codecVersion); + out.writeString(globalMetadataFileName); + } } @Override @@ -266,12 +340,16 @@ public boolean equals(Object o) { && Objects.equals(nodeId, that.nodeId) && Objects.equals(committed, that.committed) && Objects.equals(previousClusterUUID, that.previousClusterUUID) - && Objects.equals(clusterUUIDCommitted, that.clusterUUIDCommitted); + && Objects.equals(clusterUUIDCommitted, that.clusterUUIDCommitted) + && Objects.equals(globalMetadataFileName, that.globalMetadataFileName) + && Objects.equals(codecVersion, that.codecVersion); } @Override public int hashCode() { return Objects.hash( + codecVersion, + globalMetadataFileName, indices, clusterTerm, stateVersion, @@ -290,8 +368,16 @@ public String toString() { return Strings.toString(MediaTypeRegistry.JSON, this); } + public boolean onOrAfterCodecVersion(int codecVersion) { + return this.codecVersion >= codecVersion; + } + + public static ClusterMetadataManifest fromXContentV0(XContentParser parser) throws IOException { + return PARSER_V0.parse(parser, null); + } + public static ClusterMetadataManifest fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); + return CURRENT_PARSER.parse(parser, null); } /** @@ -301,6 +387,8 @@ public static ClusterMetadataManifest fromXContent(XContentParser parser) throws */ public static class Builder { + private String globalMetadataFileName; + private int codecVersion; private List indices; private long clusterTerm; private long stateVersion; @@ -317,6 +405,16 @@ public Builder indices(List indices) { return this; } + public Builder codecVersion(int codecVersion) { + this.codecVersion = codecVersion; + return this; + } + + public Builder globalMetadataFileName(String globalMetadataFileName) { + this.globalMetadataFileName = globalMetadataFileName; + return this; + } + public Builder clusterTerm(long clusterTerm) { this.clusterTerm = clusterTerm; return this; @@ -378,6 +476,8 @@ public Builder(ClusterMetadataManifest manifest) { this.opensearchVersion = manifest.opensearchVersion; this.nodeId = manifest.nodeId; this.committed = manifest.committed; + this.globalMetadataFileName = manifest.globalMetadataFileName; + this.codecVersion = manifest.codecVersion; this.indices = new ArrayList<>(manifest.indices); this.previousClusterUUID = manifest.previousClusterUUID; this.clusterUUIDCommitted = manifest.clusterUUIDCommitted; @@ -392,6 +492,8 @@ public ClusterMetadataManifest build() { opensearchVersion, nodeId, committed, + codecVersion, + globalMetadataFileName, indices, previousClusterUUID, clusterUUIDCommitted diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 0cf97de53d5f3..2092c2a0aac3f 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -15,6 +15,7 @@ import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Nullable; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; @@ -27,6 +28,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.ToXContent; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.translog.transfer.BlobStoreTransferService; @@ -55,6 +57,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -80,7 +83,9 @@ public class RemoteClusterStateService implements Closeable { private static final Logger logger = LogManager.getLogger(RemoteClusterStateService.class); + // TODO make this two variable as dynamic setting [issue: #10688] public static final int INDEX_METADATA_UPLOAD_WAIT_MILLIS = 20000; + public static final int GLOBAL_METADATA_UPLOAD_WAIT_MILLIS = 20000; public static final ChecksumBlobStoreFormat INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "index-metadata", @@ -88,11 +93,27 @@ public class RemoteClusterStateService implements Closeable { IndexMetadata::fromXContent ); + public static final ChecksumBlobStoreFormat GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( + "metadata", + METADATA_NAME_FORMAT, + Metadata::fromXContent + ); + + /** + * Manifest format compatible with older codec v0, where codec version was missing. + */ + public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V0 = + new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV0); + + /** + * Manifest format compatible with codec v1, where we introduced codec versions/global metadata. + */ public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT = new ChecksumBlobStoreFormat<>( "cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContent ); + /** * Used to specify if cluster state metadata should be published to remote store */ @@ -105,9 +126,11 @@ public class RemoteClusterStateService implements Closeable { public static final String CLUSTER_STATE_PATH_TOKEN = "cluster-state"; public static final String INDEX_PATH_TOKEN = "index"; + public static final String GLOBAL_METADATA_PATH_TOKEN = "global-metadata"; public static final String MANIFEST_PATH_TOKEN = "manifest"; public static final String MANIFEST_FILE_PREFIX = "manifest"; - public static final String INDEX_METADATA_FILE_PREFIX = "metadata"; + public static final String METADATA_FILE_PREFIX = "metadata"; + public static final int SPLITED_MANIFEST_FILE_LENGTH = 6; // file name manifest__term__version__C/P__timestamp__codecversion private final String nodeId; private final Supplier repositoriesService; @@ -121,7 +144,17 @@ public class RemoteClusterStateService implements Closeable { private final AtomicBoolean deleteStaleMetadataRunning = new AtomicBoolean(false); public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 1; - public static final int MANIFEST_CURRENT_CODEC_VERSION = 1; + public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V1; + public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 1; + + // ToXContent Params with gateway mode. + // We are using gateway context mode to persist all custom metadata. + public static final ToXContent.Params FORMAT_PARAMS; + static { + Map params = new HashMap<>(1); + params.put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY); + FORMAT_PARAMS = new ToXContent.MapParams(params); + } public RemoteClusterStateService( String nodeId, @@ -162,12 +195,22 @@ public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, Stri return null; } + // TODO: we can upload global metadata and index metadata in parallel. [issue: #10645] + // Write globalMetadata + String globalMetadataFile = writeGlobalMetadata(clusterState); + // any validations before/after upload ? final List allUploadedIndexMetadata = writeIndexMetadataParallel( clusterState, new ArrayList<>(clusterState.metadata().indices().values()) ); - final ClusterMetadataManifest manifest = uploadManifest(clusterState, allUploadedIndexMetadata, previousClusterUUID, false); + final ClusterMetadataManifest manifest = uploadManifest( + clusterState, + allUploadedIndexMetadata, + previousClusterUUID, + globalMetadataFile, + false + ); final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { logger.warn( @@ -206,6 +249,22 @@ public ClusterMetadataManifest writeIncrementalMetadata( return null; } assert previousClusterState.metadata().coordinationMetadata().term() == clusterState.metadata().coordinationMetadata().term(); + + // Write Global Metadata + final boolean updateGlobalMetadata = Metadata.isGlobalStateEquals( + previousClusterState.metadata(), + clusterState.metadata() + ) == false; + String globalMetadataFile; + // For migration case from codec V0 to V1, we have added null check on global metadata file, + // If file is empty and codec is 1 then write global metadata. + if (updateGlobalMetadata || previousManifest.getGlobalMetadataFileName() == null) { + globalMetadataFile = writeGlobalMetadata(clusterState); + } else { + globalMetadataFile = previousManifest.getGlobalMetadataFileName(); + } + + // Write Index Metadata final Map previousStateIndexMetadataVersionByName = new HashMap<>(); for (final IndexMetadata indexMetadata : previousClusterState.metadata().indices().values()) { previousStateIndexMetadataVersionByName.put(indexMetadata.getIndex().getName(), indexMetadata.getVersion()); @@ -248,6 +307,7 @@ public ClusterMetadataManifest writeIncrementalMetadata( clusterState, new ArrayList<>(allUploadedIndexMetadata.values()), previousManifest.getPreviousClusterUUID(), + globalMetadataFile, false ); deleteStaleClusterMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), RETAINED_MANIFESTS); @@ -273,6 +333,59 @@ public ClusterMetadataManifest writeIncrementalMetadata( return manifest; } + /** + * Uploads provided ClusterState's global Metadata to remote store in parallel. + * The call is blocking so the method waits for upload to finish and then return. + * + * @param clusterState current ClusterState + * @return String file name where globalMetadata file is stored. + */ + private String writeGlobalMetadata(ClusterState clusterState) throws IOException { + + AtomicReference result = new AtomicReference(); + final BlobContainer globalMetadataContainer = globalMetadataContainer( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + final String globalMetadataFilename = globalMetadataFileName(clusterState.metadata()); + + // latch to wait until upload is not finished + CountDownLatch latch = new CountDownLatch(1); + + LatchedActionListener completionListener = new LatchedActionListener<>(ActionListener.wrap(resp -> { + logger.trace(String.format(Locale.ROOT, "GlobalMetadata uploaded successfully.")); + result.set(globalMetadataContainer.path().buildAsString() + globalMetadataFilename); + }, ex -> { throw new GlobalMetadataTransferException(ex.getMessage(), ex); }), latch); + + GLOBAL_METADATA_FORMAT.writeAsync( + clusterState.metadata(), + globalMetadataContainer, + globalMetadataFilename, + blobStoreRepository.getCompressor(), + completionListener, + FORMAT_PARAMS + ); + + try { + if (latch.await(GLOBAL_METADATA_UPLOAD_WAIT_MILLIS, TimeUnit.MILLISECONDS) == false) { + // TODO: We should add metrics where transfer is timing out. [Issue: #10687] + GlobalMetadataTransferException ex = new GlobalMetadataTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of global metadata to complete") + ); + throw ex; + } + } catch (InterruptedException ex) { + GlobalMetadataTransferException exception = new GlobalMetadataTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of global metadata to complete - %s"), + ex + ); + Thread.currentThread().interrupt(); + throw exception; + } + + return result.get(); + } + /** * Uploads provided IndexMetadata's to remote store in parallel. The call is blocking so the method waits for upload to finish and then return. * @@ -381,7 +494,8 @@ private void writeIndexMetadataAsync( indexMetadataContainer, indexMetadataFilename, blobStoreRepository.getCompressor(), - completionListener + completionListener, + FORMAT_PARAMS ); } @@ -398,6 +512,7 @@ public ClusterMetadataManifest markLastStateAsCommitted(ClusterState clusterStat clusterState, previousManifest.getIndices(), previousManifest.getPreviousClusterUUID(), + previousManifest.getGlobalMetadataFileName(), true ); deleteStaleClusterUUIDs(clusterState, committedManifest); @@ -426,6 +541,7 @@ private ClusterMetadataManifest uploadManifest( ClusterState clusterState, List uploadedIndexMetadata, String previousClusterUUID, + String globalClusterMetadataFileName, boolean committed ) throws IOException { synchronized (this) { @@ -438,6 +554,8 @@ private ClusterMetadataManifest uploadManifest( Version.CURRENT, nodeId, committed, + MANIFEST_CURRENT_CODEC_VERSION, + globalClusterMetadataFileName, uploadedIndexMetadata, previousClusterUUID, clusterState.metadata().clusterUUIDCommitted() @@ -469,6 +587,12 @@ private BlobContainer indexMetadataContainer(String clusterName, String clusterU .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(INDEX_PATH_TOKEN).add(indexUUID)); } + private BlobContainer globalMetadataContainer(String clusterName, String clusterUUID) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/ + return blobStoreRepository.blobStore() + .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(GLOBAL_METADATA_PATH_TOKEN)); + } + private BlobContainer manifestContainer(String clusterName, String clusterUUID) { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest return blobStoreRepository.blobStore().blobContainer(getManifestFolderPath(clusterName, clusterUUID)); @@ -510,7 +634,7 @@ static String indexMetadataFileName(IndexMetadata indexMetadata) { // version> return String.join( DELIMITER, - INDEX_METADATA_FILE_PREFIX, + METADATA_FILE_PREFIX, RemoteStoreUtils.invertLong(indexMetadata.getVersion()), RemoteStoreUtils.invertLong(System.currentTimeMillis()), String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last @@ -518,6 +642,17 @@ static String indexMetadataFileName(IndexMetadata indexMetadata) { ); } + private static String globalMetadataFileName(Metadata metadata) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/metadata______ + return String.join( + DELIMITER, + METADATA_FILE_PREFIX, + RemoteStoreUtils.invertLong(metadata.version()), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) + ); + } + private BlobPath getManifestFolderPath(String clusterName, String clusterUUID) { return getCusterMetadataBasePath(clusterName, clusterUUID).add(MANIFEST_PATH_TOKEN); } @@ -601,7 +736,8 @@ public String getLastKnownUUIDFromRemote(String clusterName) { return validChain.get(0); } catch (IOException e) { throw new IllegalStateException( - String.format(Locale.ROOT, "Error while fetching previous UUIDs from remote store for cluster name: %s", clusterName) + String.format(Locale.ROOT, "Error while fetching previous UUIDs from remote store for cluster name: %s", clusterName), + e ); } } @@ -622,7 +758,8 @@ private Map getLatestManifestForAllClusterUUIDs manifest.ifPresent(clusterMetadataManifest -> manifestsByClusterUUID.put(clusterUUID, clusterMetadataManifest)); } catch (Exception e) { throw new IllegalStateException( - String.format(Locale.ROOT, "Exception in fetching manifest for clusterUUID: %s", clusterUUID) + String.format(Locale.ROOT, "Exception in fetching manifest for clusterUUID: %s", clusterUUID), + e ); } } @@ -788,7 +925,7 @@ private Optional getLatestManifestFileName(String clusterName, String cl private ClusterMetadataManifest fetchRemoteClusterMetadataManifest(String clusterName, String clusterUUID, String filename) throws IllegalStateException { try { - return RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.read( + return getClusterMetadataManifestBlobStoreFormat(filename).read( manifestContainer(clusterName, clusterUUID), filename, blobStoreRepository.getNamedXContentRegistry() @@ -798,6 +935,29 @@ private ClusterMetadataManifest fetchRemoteClusterMetadataManifest(String cluste } } + private ChecksumBlobStoreFormat getClusterMetadataManifestBlobStoreFormat(String fileName) { + long codecVersion = getManifestCodecVersion(fileName); + if (codecVersion == MANIFEST_CURRENT_CODEC_VERSION) { + return CLUSTER_METADATA_MANIFEST_FORMAT; + } else if (codecVersion == ClusterMetadataManifest.CODEC_V0) { + return CLUSTER_METADATA_MANIFEST_FORMAT_V0; + } + + throw new IllegalArgumentException("Cluster metadata manifest file is corrupted, don't have valid codec version"); + } + + private int getManifestCodecVersion(String fileName) { + String[] splitName = fileName.split(DELIMITER); + if (splitName.length == SPLITED_MANIFEST_FILE_LENGTH) { + return Integer.parseInt(splitName[splitName.length - 1]); // Last value would be codec version. + } else if (splitName.length < SPLITED_MANIFEST_FILE_LENGTH) { // Where codec is not part of file name, i.e. default codec version 0 + // is used. + return ClusterMetadataManifest.CODEC_V0; + } else { + throw new IllegalArgumentException("Manifest file name is corrupted"); + } + } + public static String encodeString(String content) { return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); } @@ -816,6 +976,20 @@ public IndexMetadataTransferException(String errorDesc, Throwable cause) { } } + /** + * Exception for GlobalMetadata transfer failures to remote + */ + static class GlobalMetadataTransferException extends RuntimeException { + + public GlobalMetadataTransferException(String errorDesc) { + super(errorDesc); + } + + public GlobalMetadataTransferException(String errorDesc, Throwable cause) { + super(errorDesc, cause); + } + } + /** * Purges all remote cluster state against provided cluster UUIDs * @@ -907,6 +1081,7 @@ private void deleteClusterMetadata( Set filesToKeep = new HashSet<>(); Set staleManifestPaths = new HashSet<>(); Set staleIndexMetadataPaths = new HashSet<>(); + Set staleGlobalMetadataPaths = new HashSet<>(); activeManifestBlobMetadata.forEach(blobMetadata -> { ClusterMetadataManifest clusterMetadataManifest = fetchRemoteClusterMetadataManifest( clusterName, @@ -915,6 +1090,7 @@ private void deleteClusterMetadata( ); clusterMetadataManifest.getIndices() .forEach(uploadedIndexMetadata -> filesToKeep.add(uploadedIndexMetadata.getUploadedFilename())); + filesToKeep.add(clusterMetadataManifest.getGlobalMetadataFileName()); }); staleManifestBlobMetadata.forEach(blobMetadata -> { ClusterMetadataManifest clusterMetadataManifest = fetchRemoteClusterMetadataManifest( @@ -923,6 +1099,14 @@ private void deleteClusterMetadata( blobMetadata.name() ); staleManifestPaths.add(new BlobPath().add(MANIFEST_PATH_TOKEN).buildAsString() + blobMetadata.name()); + if (filesToKeep.contains(clusterMetadataManifest.getGlobalMetadataFileName()) == false) { + String[] globalMetadataSplitPath = clusterMetadataManifest.getGlobalMetadataFileName().split("/"); + staleGlobalMetadataPaths.add( + new BlobPath().add(GLOBAL_METADATA_PATH_TOKEN).buildAsString() + GLOBAL_METADATA_FORMAT.blobName( + globalMetadataSplitPath[globalMetadataSplitPath.length - 1] + ) + ); + } clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { if (filesToKeep.contains(uploadedIndexMetadata.getUploadedFilename()) == false) { staleIndexMetadataPaths.add( @@ -938,6 +1122,7 @@ private void deleteClusterMetadata( return; } + deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleGlobalMetadataPaths)); deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleIndexMetadataPaths)); deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleManifestPaths)); } catch (IllegalStateException e) { diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 69883e0d19c8d..c48efa9d3d2ee 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -173,6 +173,7 @@ import java.util.stream.Stream; import static org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; +import static org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS; /** * BlobStore - based implementation of Snapshot Repository @@ -3333,7 +3334,12 @@ private void writeShardIndexBlobAtomic( () -> new ParameterizedMessage("[{}] Writing shard index [{}] to [{}]", metadata.name(), indexGeneration, shardContainer.path()) ); final String blobName = INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(String.valueOf(indexGeneration)); - writeAtomic(shardContainer, blobName, INDEX_SHARD_SNAPSHOTS_FORMAT.serialize(updatedSnapshots, blobName, compressor), true); + writeAtomic( + shardContainer, + blobName, + INDEX_SHARD_SNAPSHOTS_FORMAT.serialize(updatedSnapshots, blobName, compressor, SNAPSHOT_ONLY_FORMAT_PARAMS), + true + ); } // Unused blobs are all previous index-, data- and meta-blobs and that are not referenced by the new index- as well as all diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 7e1960171043a..17cb68f798094 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -83,7 +83,7 @@ public final class ChecksumBlobStoreFormat { // Serialization parameters to specify correct context for metadata serialization - private static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; + public static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; static { Map snapshotOnlyParams = new HashMap<>(); @@ -171,7 +171,7 @@ public T deserialize(String blobName, NamedXContentRegistry namedXContentRegistr */ public void write(final T obj, final BlobContainer blobContainer, final String name, final Compressor compressor) throws IOException { final String blobName = blobName(name); - final BytesReference bytes = serialize(obj, blobName, compressor); + final BytesReference bytes = serialize(obj, blobName, compressor, SNAPSHOT_ONLY_FORMAT_PARAMS); blobContainer.writeBlob(blobName, bytes.streamInput(), bytes.length(), false); } @@ -184,13 +184,15 @@ public void write(final T obj, final BlobContainer blobContainer, final String n * @param name blob name * @param compressor whether to use compression * @param listener listener to listen to write result + * @param params ToXContent params */ public void writeAsync( final T obj, final BlobContainer blobContainer, final String name, final Compressor compressor, - ActionListener listener + ActionListener listener, + final ToXContent.Params params ) throws IOException { if (blobContainer instanceof AsyncMultiStreamBlobContainer == false) { write(obj, blobContainer, name, compressor); @@ -198,7 +200,7 @@ public void writeAsync( return; } final String blobName = blobName(name); - final BytesReference bytes = serialize(obj, blobName, compressor); + final BytesReference bytes = serialize(obj, blobName, compressor, params); final String resourceDescription = "ChecksumBlobStoreFormat.writeAsync(blob=\"" + blobName + "\")"; try (IndexInput input = new ByteArrayIndexInput(resourceDescription, BytesReference.toBytes(bytes))) { long expectedChecksum; @@ -230,7 +232,8 @@ public void writeAsync( } } - public BytesReference serialize(final T obj, final String blobName, final Compressor compressor) throws IOException { + public BytesReference serialize(final T obj, final String blobName, final Compressor compressor, final ToXContent.Params params) + throws IOException { try (BytesStreamOutput outputStream = new BytesStreamOutput()) { try ( OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput( @@ -254,7 +257,7 @@ public void close() throws IOException { ) ) { builder.startObject(); - obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS); + obj.toXContent(builder, params); builder.endObject(); } CodecUtil.writeFooter(indexOutput); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index f37823d2c0c7d..1c0dc7fc1ca2d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -938,6 +938,8 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep Version.CURRENT, randomAlphaOfLength(10), false, + 1, + randomAlphaOfLength(10), Collections.emptyList(), randomAlphaOfLength(10), true diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java index 66426c2a880a3..6c9a3201656d7 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -26,6 +26,33 @@ public class ClusterMetadataManifestTests extends OpenSearchTestCase { + public void testClusterMetadataManifestXContentV0() throws IOException { + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + ClusterMetadataManifest originalManifest = new ClusterMetadataManifest( + 1L, + 1L, + "test-cluster-uuid", + "test-state-uuid", + Version.CURRENT, + "test-node-id", + false, + ClusterMetadataManifest.CODEC_V0, + null, + Collections.singletonList(uploadedIndexMetadata), + "prev-cluster-uuid", + true + ); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + originalManifest.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final ClusterMetadataManifest fromXContentManifest = ClusterMetadataManifest.fromXContentV0(parser); + assertEquals(originalManifest, fromXContentManifest); + } + } + public void testClusterMetadataManifestXContent() throws IOException { UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); ClusterMetadataManifest originalManifest = new ClusterMetadataManifest( @@ -36,6 +63,8 @@ public void testClusterMetadataManifestXContent() throws IOException { Version.CURRENT, "test-node-id", false, + ClusterMetadataManifest.CODEC_V1, + "test-global-metadata-file", Collections.singletonList(uploadedIndexMetadata), "prev-cluster-uuid", true @@ -60,6 +89,8 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { Version.CURRENT, "B10RX1f5RJenMQvYccCgSQ", true, + 1, + "test-global-metadata-file", randomUploadedIndexMetadataList(), "yfObdx8KSMKKrXf8UyHhM", true diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 119d19cc34981..ddee3bcf10382 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -66,10 +66,11 @@ import org.mockito.ArgumentMatchers; import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateService.FORMAT_PARAMS; import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_CURRENT_CODEC_VERSION; -import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_FILE_PREFIX; import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_FILE_PREFIX; +import static org.opensearch.gateway.remote.RemoteClusterStateService.METADATA_FILE_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -232,14 +233,15 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getGlobalMetadataFileName(), notNullValue()); assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); - assertEquals(actionListenerArgumentCaptor.getAllValues().size(), 1); - assertEquals(writeContextArgumentCaptor.getAllValues().size(), 1); + assertEquals(actionListenerArgumentCaptor.getAllValues().size(), 2); + assertEquals(writeContextArgumentCaptor.getAllValues().size(), 2); WriteContext capturedWriteContext = writeContextArgumentCaptor.getValue(); byte[] writtenBytes = capturedWriteContext.getStreamProvider(Integer.MAX_VALUE).provideStream(0).getInputStream().readAllBytes(); @@ -263,7 +265,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { } - public void testWriteFullMetadataInParallelFailure() throws IOException { + public void testWriteFullMetadataFailureForGlobalMetadata() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); @@ -274,6 +276,27 @@ public void testWriteFullMetadataInParallelFailure() throws IOException { return null; }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); + remoteClusterStateService.start(); + assertThrows( + RemoteClusterStateService.GlobalMetadataTransferException.class, + () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) + ); + } + + public void testWriteFullMetadataInParallelFailureForIndexMetadata() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + + doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onFailure(new RuntimeException("Cannot upload to remote")); + return null; + }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); + remoteClusterStateService.start(); assertThrows( RemoteClusterStateService.IndexMetadataTransferException.class, @@ -338,6 +361,207 @@ public void testWriteIncrementalMetadataSuccess() throws IOException { assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); } + /* + * Here we will verify the migration of manifest file from codec V0 and V1. + * + * Initially codec version is 0 and global metadata is also null, we will perform index metadata update. + * In final manifest codec version should be 1 and + * global metadata should be updated, even if it was not changed in this cluster state update + */ + public void testMigrationFromCodecV0ManifestToCodecV1Manifest() throws IOException { + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .nodes(nodesWithLocalNodeClusterManager()) + .build(); + + // Update only index metadata + final IndexMetadata indexMetadata = new IndexMetadata.Builder("test").settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") + .build() + ).numberOfShards(1).numberOfReplicas(0).build(); + Metadata newMetadata = Metadata.builder(previousClusterState.metadata()).put(indexMetadata, true).build(); + ClusterState newClusterState = ClusterState.builder(previousClusterState).metadata(newMetadata).build(); + + // previous manifest with codec 0 and null global metadata + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() + .codecVersion(ClusterMetadataManifest.CODEC_V0) + .globalMetadataFileName(null) + .indices(Collections.emptyList()) + .build(); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifestAfterUpdate = remoteClusterStateService.writeIncrementalMetadata( + previousClusterState, + newClusterState, + previousManifest + ); + + // global metadata is updated + assertThat(manifestAfterUpdate.getGlobalMetadataFileName(), notNullValue()); + // Manifest file with codec version with 1 is updated. + assertThat(manifestAfterUpdate.getCodecVersion(), is(ClusterMetadataManifest.CODEC_V1)); + } + + public void testWriteIncrementalGlobalMetadataSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() + .codecVersion(2) + .globalMetadataFileName("global-metadata-file") + .indices(Collections.emptyList()) + .build(); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata( + previousClusterState, + clusterState, + previousManifest + ); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(Collections.emptyList()) + .globalMetadataFileName("mock-filename") + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getGlobalMetadataFileName(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + } + + /* + * Here we will verify index metadata is not uploaded again if change is only in global metadata + */ + public void testGlobalMetadataOnlyUpdated() throws IOException { + // setup + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState initialClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + final ClusterMetadataManifest initialManifest = ClusterMetadataManifest.builder() + .codecVersion(2) + .globalMetadataFileName("global-metadata-file") + .indices(Collections.emptyList()) + .build(); + remoteClusterStateService.start(); + + // Initial cluster state with index. + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + // Updating remote cluster state with changing index metadata + final ClusterMetadataManifest manifestAfterIndexMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + initialClusterState, + clusterState, + initialManifest + ); + + // new cluster state where only global metadata is different + Metadata newMetadata = Metadata.builder(clusterState.metadata()) + .persistentSettings(Settings.builder().put("cluster.blocks.read_only", true).build()) + .build(); + ClusterState newClusterState = ClusterState.builder(clusterState).metadata(newMetadata).build(); + + // updating remote cluster state with global metadata + final ClusterMetadataManifest manifestAfterGlobalMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + clusterState, + newClusterState, + manifestAfterIndexMetadataUpdate + ); + + // Verify that index metadata information is same in manifest files + assertThat(manifestAfterIndexMetadataUpdate.getIndices().size(), is(manifestAfterGlobalMetadataUpdate.getIndices().size())); + assertThat( + manifestAfterIndexMetadataUpdate.getIndices().get(0).getIndexName(), + is(manifestAfterGlobalMetadataUpdate.getIndices().get(0).getIndexName()) + ); + assertThat( + manifestAfterIndexMetadataUpdate.getIndices().get(0).getIndexUUID(), + is(manifestAfterGlobalMetadataUpdate.getIndices().get(0).getIndexUUID()) + ); + + // since timestamp is part of file name, if file name is same we can confirm that file is not update in global metadata update + assertThat( + manifestAfterIndexMetadataUpdate.getIndices().get(0).getUploadedFilename(), + is(manifestAfterGlobalMetadataUpdate.getIndices().get(0).getUploadedFilename()) + ); + + // global metadata file would have changed + assertFalse( + manifestAfterIndexMetadataUpdate.getGlobalMetadataFileName() + .equalsIgnoreCase(manifestAfterGlobalMetadataUpdate.getGlobalMetadataFileName()) + ); + } + + /* + * Here we will verify global metadata is not uploaded again if change is only in index metadata + */ + public void testIndexMetadataOnlyUpdated() throws IOException { + // setup + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState initialClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + final ClusterMetadataManifest initialManifest = ClusterMetadataManifest.builder() + .codecVersion(2) + .indices(Collections.emptyList()) + .build(); + remoteClusterStateService.start(); + + // Initial cluster state with global metadata. + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + + // Updating remote cluster state with changing global metadata + final ClusterMetadataManifest manifestAfterGlobalMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + initialClusterState, + clusterState, + initialManifest + ); + + // new cluster state where only Index metadata is different + final IndexMetadata indexMetadata = new IndexMetadata.Builder("test").settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") + .build() + ).numberOfShards(1).numberOfReplicas(0).build(); + Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMetadata, true).build(); + ClusterState newClusterState = ClusterState.builder(clusterState).metadata(newMetadata).build(); + + // updating remote cluster state with index metadata + final ClusterMetadataManifest manifestAfterIndexMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + clusterState, + newClusterState, + manifestAfterGlobalMetadataUpdate + ); + + // Verify that global metadata information is same in manifest files after updating index Metadata + // since timestamp is part of file name, if file name is same we can confirm that file is not update in index metadata update + assertThat( + manifestAfterIndexMetadataUpdate.getGlobalMetadataFileName(), + is(manifestAfterGlobalMetadataUpdate.getGlobalMetadataFileName()) + ); + + // Index metadata would have changed + assertThat(manifestAfterGlobalMetadataUpdate.getIndices().size(), is(0)); + assertThat(manifestAfterIndexMetadataUpdate.getIndices().size(), is(1)); + } + public void testReadLatestMetadataManifestFailedIOException() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); @@ -402,6 +626,7 @@ public void testReadLatestMetadataManifestSuccessButNoIndexMetadata() throws IOE .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID("prev-cluster-uuid") + .codecVersion(ClusterMetadataManifest.CODEC_V0) .build(); BlobContainer blobContainer = mockBlobStoreObjects(); @@ -458,6 +683,7 @@ public void testReadLatestMetadataManifestSuccess() throws IOException { .clusterUUID("cluster-uuid") .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .codecVersion(ClusterMetadataManifest.CODEC_V0) .previousClusterUUID("prev-cluster-uuid") .build(); @@ -504,6 +730,7 @@ public void testReadLatestIndexMetadataSuccess() throws IOException { .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID("prev-cluster-uuid") + .codecVersion(ClusterMetadataManifest.CODEC_V0) .build(); mockBlobContainer(mockBlobStoreObjects(), expectedManifest, Map.of(index.getUUID(), indexMetadata)); @@ -691,7 +918,7 @@ public void testFileNames() { String indexMetadataFileName = RemoteClusterStateService.indexMetadataFileName(indexMetadata); String[] splittedIndexMetadataFileName = indexMetadataFileName.split(DELIMITER); assertThat(indexMetadataFileName.split(DELIMITER).length, is(4)); - assertThat(splittedIndexMetadataFileName[0], is(INDEX_METADATA_FILE_PREFIX)); + assertThat(splittedIndexMetadataFileName[0], is(METADATA_FILE_PREFIX)); assertThat(splittedIndexMetadataFileName[1], is(RemoteStoreUtils.invertLong(indexMetadata.getVersion()))); assertThat(splittedIndexMetadataFileName[3], is(String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION))); @@ -820,6 +1047,7 @@ private ClusterMetadataManifest generateClusterMetadataManifest( .previousClusterUUID(previousClusterUUID) .committed(true) .clusterUUIDCommitted(true) + .globalMetadataFileName("test-global-metadata") .build(); } @@ -859,7 +1087,8 @@ private void mockBlobContainer( BytesReference bytes = RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( clusterMetadataManifest, "manifestFileName", - blobStoreRepository.getCompressor() + blobStoreRepository.getCompressor(), + FORMAT_PARAMS ); when(blobContainer.readBlob("manifestFileName")).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); @@ -873,7 +1102,8 @@ private void mockBlobContainer( BytesReference bytesIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.serialize( indexMetadata, fileName, - blobStoreRepository.getCompressor() + blobStoreRepository.getCompressor(), + FORMAT_PARAMS ); when(blobContainer.readBlob(fileName + ".dat")).thenReturn( new ByteArrayInputStream(bytesIndexMetadata.streamInput().readAllBytes()) @@ -884,6 +1114,22 @@ private void mockBlobContainer( }); } + private static ClusterState.Builder generateClusterStateWithGlobalMetadata() { + final Settings clusterSettings = Settings.builder().put("cluster.blocks.read_only", true).build(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + + return ClusterState.builder(ClusterName.DEFAULT) + .version(1L) + .stateUUID("state-uuid") + .metadata( + Metadata.builder() + .persistentSettings(clusterSettings) + .clusterUUID("cluster-uuid") + .coordinationMetadata(coordinationMetadata) + .build() + ); + } + private static ClusterState.Builder generateClusterStateWithOneIndex() { final Index index = new Index("test-index", "index-uuid"); final Settings idxSettings = Settings.builder() diff --git a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java index 03f0d27188027..c114b56bd0b39 100644 --- a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java +++ b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java @@ -152,14 +152,16 @@ public void onFailure(Exception e) { mockBlobContainer, "check-smile", CompressorRegistry.none(), - actionListener + actionListener, + ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS ); checksumSMILE.writeAsync( new BlobObj("checksum smile compressed"), mockBlobContainer, "check-smile-comp", CompressorRegistry.getCompressor(DeflateCompressor.NAME), - actionListener + actionListener, + ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS ); latch.await(); From e942483e716255b8f801dc1242f909d73019aa70 Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Wed, 18 Oct 2023 04:45:49 -0700 Subject: [PATCH 031/178] [Segment Replication] Unmute Test SegmentReplicationTargetServiceTests.testShardAlreadyReplicating (#10660) * Remove Awaits fix. Signed-off-by: Rishikesh1159 * apply spotlesscheck. Signed-off-by: Rishikesh1159 * add test logging. Signed-off-by: Rishikesh1159 --------- Signed-off-by: Rishikesh1159 --- .../replication/SegmentReplicationTargetServiceTests.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index c108de5ee5ea6..d16a188f405e3 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -39,6 +39,7 @@ import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -246,7 +247,7 @@ public void testAlreadyOnNewCheckpoint() { verify(spy, times(0)).startReplication(any(), any(), any()); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8928") + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testShardAlreadyReplicating() { CountDownLatch blockGetCheckpointMetadata = new CountDownLatch(1); SegmentReplicationSource source = new TestReplicationSource() { From 75bd9f2d341efbe0a795c267af923eb48bffbd91 Mon Sep 17 00:00:00 2001 From: Vikas Bansal <43470111+vikasvb90@users.noreply.github.com> Date: Wed, 18 Oct 2023 18:14:55 +0530 Subject: [PATCH 032/178] Added BufferedInputStream to allow mark and reset ops during IO errors (#10690) Signed-off-by: vikasvb90 --- .../repositories/s3/S3RepositoryPlugin.java | 14 ++++++++++---- .../repositories/s3/async/AsyncPartsHandler.java | 6 +++++- .../s3/async/AsyncTransferManager.java | 5 ++++- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index a80ee0ca35fae..c6450e49d08e2 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -55,6 +56,7 @@ import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.threadpool.FixedExecutorBuilder; +import org.opensearch.threadpool.ScalingExecutorBuilder; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; @@ -93,17 +95,21 @@ public S3RepositoryPlugin(final Settings settings, final Path configPath) { @Override public List> getExecutorBuilders(Settings settings) { List> executorBuilders = new ArrayList<>(); + int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors(settings)); executorBuilders.add( new FixedExecutorBuilder(settings, PRIORITY_FUTURE_COMPLETION, priorityPoolCount(settings), 10_000, PRIORITY_FUTURE_COMPLETION) ); - executorBuilders.add( - new FixedExecutorBuilder(settings, PRIORITY_STREAM_READER, priorityPoolCount(settings), 10_000, PRIORITY_STREAM_READER) - ); + executorBuilders.add(new ScalingExecutorBuilder(PRIORITY_STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + executorBuilders.add(new FixedExecutorBuilder(settings, FUTURE_COMPLETION, normalPoolCount(settings), 10_000, FUTURE_COMPLETION)); - executorBuilders.add(new FixedExecutorBuilder(settings, STREAM_READER, normalPoolCount(settings), 10_000, STREAM_READER)); + executorBuilders.add(new ScalingExecutorBuilder(STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); return executorBuilders; } + static int halfAllocatedProcessorsMaxFive(final int allocatedProcessors) { + return boundedBy((allocatedProcessors + 1) / 2, 1, 5); + } + S3RepositoryPlugin(final Settings settings, final Path configPath, final S3Service service, final S3AsyncService s3AsyncService) { this.service = Objects.requireNonNull(service, "S3 service must not be null"); this.configPath = configPath; diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java index ad6939ce299d6..86bb70e5a40a2 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java @@ -23,9 +23,11 @@ import org.opensearch.common.StreamContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.s3.SocketAccess; import org.opensearch.repositories.s3.io.CheckedContainer; +import java.io.BufferedInputStream; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -142,7 +144,9 @@ private static void uploadPart( () -> s3AsyncClient.uploadPart( uploadPartRequest, AsyncRequestBody.fromInputStream( - inputStreamContainer.getInputStream(), + // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered + // data can be retried instead of retrying whole file by the application. + new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)), inputStreamContainer.getContentLength(), streamReadExecutor ) diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java index 8d45c2167a3d1..fb1e409388a69 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java @@ -37,6 +37,7 @@ import org.opensearch.repositories.s3.SocketAccess; import org.opensearch.repositories.s3.io.CheckedContainer; +import java.io.BufferedInputStream; import java.io.IOException; import java.util.Arrays; import java.util.Base64; @@ -303,7 +304,9 @@ private void uploadInOneChunk( () -> s3AsyncClient.putObject( putObjectRequestBuilder.build(), AsyncRequestBody.fromInputStream( - inputStreamContainer.getInputStream(), + // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered + // data can be retried instead of retrying whole file by the application. + new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)), inputStreamContainer.getContentLength(), streamReadExecutor ) From 1715da57f5c6223bf590118bf5adad8a7afaf9da Mon Sep 17 00:00:00 2001 From: Bukhtawar Khan Date: Wed, 18 Oct 2023 18:23:24 +0530 Subject: [PATCH 033/178] Repository stats for remote store (#10567) Signed-off-by: Bukhtawar Khan --- CHANGELOG.md | 1 + .../s3/S3BlobStoreRepositoryTests.java | 75 ++++++++ .../repositories/s3/S3BlobContainer.java | 5 +- .../repositories/s3/S3BlobStore.java | 12 ++ .../repositories/s3/StatsMetricPublisher.java | 182 +++++++++++++----- .../s3/async/AsyncTransferManager.java | 24 ++- .../s3/async/AsyncTransferManagerTests.java | 13 +- .../admin/cluster/node/stats/NodeStats.java | 24 ++- .../cluster/node/stats/NodesStatsRequest.java | 3 +- .../node/stats/TransportNodesStatsAction.java | 3 +- .../stats/TransportClusterStatsAction.java | 1 + .../common/blobstore/BlobStore.java | 31 +++ .../common/blobstore/EncryptedBlobStore.java | 10 + .../main/java/org/opensearch/node/Node.java | 3 +- .../java/org/opensearch/node/NodeService.java | 12 +- .../repositories/RepositoriesService.java | 19 +- .../repositories/RepositoriesStats.java | 52 +++++ .../RepositoriesStatsArchive.java | 14 -- .../repositories/RepositoryInfo.java | 54 +----- .../repositories/RepositoryStats.java | 69 ++++++- .../repositories/RepositoryStatsSnapshot.java | 22 +-- .../blobstore/BlobStoreRepository.java | 2 + .../blobstore/MeteredBlobStoreRepository.java | 18 +- .../cluster/node/stats/NodeStatsTests.java | 1 + .../opensearch/cluster/DiskUsageTests.java | 6 + .../RepositoriesServiceTests.java | 13 +- .../RepositoriesStatsArchiveTests.java | 8 +- .../MockInternalClusterInfoService.java | 3 +- ...chMockAPIBasedRepositoryIntegTestCase.java | 2 +- .../opensearch/test/InternalTestCluster.java | 1 + 30 files changed, 479 insertions(+), 204 deletions(-) create mode 100644 server/src/main/java/org/opensearch/repositories/RepositoriesStats.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 243423223ca52..0742215a2dfb9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,6 +88,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added - Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) +- [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) ### Dependencies - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 5f88ad7867513..1361f3165b653 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -37,6 +37,8 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyTransactionIdStage; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SuppressForbidden; @@ -51,10 +53,15 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.RepositoryStats; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; import org.opensearch.repositories.s3.utils.AwsRequestSigner; import org.opensearch.snapshots.mockstore.BlobStoreWrapper; +import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; @@ -63,12 +70,18 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.stream.StreamSupport; import fixture.s3.S3HttpHandler; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") // Need to set up a new cluster for each test because cluster settings use randomized authentication settings @@ -152,6 +165,66 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } + @Override + public void testRequestStats() throws Exception { + final String repository = createRepository(randomName()); + final String index = "index-no-merges"; + createIndex( + index, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + + final long nbDocs = randomLongBetween(10_000L, 20_000L); + try (BackgroundIndexer indexer = new BackgroundIndexer(index, "_doc", client(), (int) nbDocs)) { + waitForDocs(nbDocs, indexer); + } + + flushAndRefresh(index); + ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); + assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs); + + final String snapshot = "snapshot"; + assertSuccessfulSnapshot( + client().admin().cluster().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index) + ); + + assertAcked(client().admin().indices().prepareDelete(index)); + + assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true)); + ensureGreen(index); + assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs); + + assertAcked(client().admin().cluster().prepareDeleteSnapshot(repository, snapshot).get()); + + final RepositoryStats repositoryStats = StreamSupport.stream( + internalCluster().getInstances(RepositoriesService.class).spliterator(), + false + ).map(repositoriesService -> { + try { + return repositoriesService.repository(repository); + } catch (RepositoryMissingException e) { + return null; + } + }).filter(Objects::nonNull).map(Repository::stats).reduce(RepositoryStats::merge).get(); + + Map> extendedStats = repositoryStats.extendedStats; + Map aggregatedStats = new HashMap<>(); + extendedStats.forEach((k, v) -> { + if (k == BlobStore.Metric.RETRY_COUNT || k == BlobStore.Metric.REQUEST_SUCCESS || k == BlobStore.Metric.REQUEST_FAILURE) { + for (Map.Entry entry : v.entrySet()) { + aggregatedStats.merge(entry.getKey(), entry.getValue(), Math::addExact); + } + } + + }); + final Map mockCalls = getMockRequestCounts(); + + String assertionErrorMsg = String.format("SDK sent [%s] calls and handler measured [%s] calls", aggregatedStats, mockCalls); + + assertEquals(assertionErrorMsg, mockCalls, aggregatedStats); + } + /** * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. */ @@ -263,6 +336,8 @@ public void maybeTrack(final String request, Headers requestHeaders) { trackRequest("PutMultipartObject"); } else if (Regex.simpleMatch("PUT /*/*", request)) { trackRequest("PutObject"); + } else if (Regex.simpleMatch("POST /*?delete*", request)) { + trackRequest("DeleteObjects"); } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 9777bd974d56c..24aee99242957 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -199,7 +199,7 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp ? amazonS3Reference.get().priorityClient() : amazonS3Reference.get().client(); CompletableFuture completableFuture = blobStore.getAsyncTransferManager() - .uploadObject(s3AsyncClient, uploadRequest, streamContext); + .uploadObject(s3AsyncClient, uploadRequest, streamContext, blobStore.getStatsMetricPublisher()); completableFuture.whenComplete((response, throwable) -> { if (throwable == null) { completionListener.onResponse(response); @@ -384,7 +384,7 @@ private void doDeleteBlobs(List blobNames, boolean relative) throws IOEx assert outstanding.isEmpty(); } - private static DeleteObjectsRequest bulkDelete(String bucket, List blobs) { + private DeleteObjectsRequest bulkDelete(String bucket, List blobs) { return DeleteObjectsRequest.builder() .bucket(bucket) .delete( @@ -393,6 +393,7 @@ private static DeleteObjectsRequest bulkDelete(String bucket, List blobs .quiet(true) .build() ) + .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().deleteObjectsMetricPublisher)) .build(); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index 80005d92344a4..f568d871dd31a 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -47,6 +47,8 @@ import org.opensearch.repositories.s3.async.AsyncTransferManager; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; import java.util.Locale; import java.util.Map; @@ -180,6 +182,16 @@ public Map stats() { return statsMetricPublisher.getStats().toMap(); } + @Override + public Map> extendedStats() { + if (statsMetricPublisher.getExtendedStats() == null || statsMetricPublisher.getExtendedStats().isEmpty()) { + return Collections.emptyMap(); + } + Map> extendedStats = new HashMap<>(); + statsMetricPublisher.getExtendedStats().forEach((k, v) -> extendedStats.put(k, v.toMap())); + return extendedStats; + } + public ObjectCannedACL getCannedACL() { return cannedACL; } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java index cad0037f99249..0c63bfdb1ff97 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java @@ -8,10 +8,13 @@ package org.opensearch.repositories.s3; -import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.metrics.MetricCollection; import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.metrics.MetricRecord; +import org.opensearch.common.blobstore.BlobStore; + +import java.time.Duration; import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; @@ -20,18 +23,67 @@ public class StatsMetricPublisher { private final Stats stats = new Stats(); + private final Map extendedStats = new HashMap<>() { + { + put(BlobStore.Metric.REQUEST_LATENCY, new Stats()); + put(BlobStore.Metric.REQUEST_SUCCESS, new Stats()); + put(BlobStore.Metric.REQUEST_FAILURE, new Stats()); + put(BlobStore.Metric.RETRY_COUNT, new Stats()); + } + }; + public MetricPublisher listObjectsMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.listCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).listMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).listMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).listMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).listMetrics.addAndGet(1); + } + stats.listMetrics.addAndGet(1); + break; + } + } + } + + @Override + public void close() {} + }; + + public MetricPublisher deleteObjectsMetricPublisher = new MetricPublisher() { + @Override + public void publish(MetricCollection metricCollection) { + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).deleteMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).deleteMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).deleteMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).deleteMetrics.addAndGet(1); + } + stats.deleteMetrics.addAndGet(1); + break; + } + } } @Override @@ -41,15 +93,26 @@ public void close() {} public MetricPublisher getObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.getCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).getMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).getMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).getMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).getMetrics.addAndGet(1); + } + stats.getMetrics.addAndGet(1); + break; + } + } } @Override @@ -59,15 +122,26 @@ public void close() {} public MetricPublisher putObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.putCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).putMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).putMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).putMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).putMetrics.addAndGet(1); + } + stats.putMetrics.addAndGet(1); + break; + } + } } @Override @@ -77,15 +151,26 @@ public void close() {} public MetricPublisher multipartUploadMetricCollector = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.postCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).multiPartPutMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).multiPartPutMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).multiPartPutMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).multiPartPutMetrics.addAndGet(1); + } + stats.multiPartPutMetrics.addAndGet(1); + break; + } + } } @Override @@ -96,22 +181,29 @@ public Stats getStats() { return stats; } + public Map getExtendedStats() { + return extendedStats; + } + static class Stats { - final AtomicLong listCount = new AtomicLong(); + final AtomicLong listMetrics = new AtomicLong(); + + final AtomicLong getMetrics = new AtomicLong(); - final AtomicLong getCount = new AtomicLong(); + final AtomicLong putMetrics = new AtomicLong(); - final AtomicLong putCount = new AtomicLong(); + final AtomicLong deleteMetrics = new AtomicLong(); - final AtomicLong postCount = new AtomicLong(); + final AtomicLong multiPartPutMetrics = new AtomicLong(); Map toMap() { final Map results = new HashMap<>(); - results.put("GetObject", getCount.get()); - results.put("ListObjects", listCount.get()); - results.put("PutObject", putCount.get()); - results.put("PutMultipartObject", postCount.get()); + results.put("GetObject", getMetrics.get()); + results.put("ListObjects", listMetrics.get()); + results.put("PutObject", putMetrics.get()); + results.put("DeleteObjects", deleteMetrics.get()); + results.put("PutMultipartObject", multiPartPutMetrics.get()); return results; } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java index fb1e409388a69..db04636b89d50 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java @@ -35,6 +35,7 @@ import org.opensearch.common.util.ByteUtils; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.s3.SocketAccess; +import org.opensearch.repositories.s3.StatsMetricPublisher; import org.opensearch.repositories.s3.io.CheckedContainer; import java.io.BufferedInputStream; @@ -87,16 +88,21 @@ public AsyncTransferManager(long minimumPartSize, ExecutorService executorServic * @param streamContext The {@link StreamContext} to supply streams during upload * @return A {@link CompletableFuture} to listen for upload completion */ - public CompletableFuture uploadObject(S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, StreamContext streamContext) { + public CompletableFuture uploadObject( + S3AsyncClient s3AsyncClient, + UploadRequest uploadRequest, + StreamContext streamContext, + StatsMetricPublisher statsMetricPublisher + ) { CompletableFuture returnFuture = new CompletableFuture<>(); try { if (streamContext.getNumberOfParts() == 1) { log.debug(() -> "Starting the upload as a single upload part request"); - uploadInOneChunk(s3AsyncClient, uploadRequest, streamContext.provideStream(0), returnFuture); + uploadInOneChunk(s3AsyncClient, uploadRequest, streamContext.provideStream(0), returnFuture, statsMetricPublisher); } else { log.debug(() -> "Starting the upload as multipart upload request"); - uploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture); + uploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture, statsMetricPublisher); } } catch (Throwable throwable) { returnFuture.completeExceptionally(throwable); @@ -109,12 +115,14 @@ private void uploadInParts( S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, StreamContext streamContext, - CompletableFuture returnFuture + CompletableFuture returnFuture, + StatsMetricPublisher statsMetricPublisher ) { CreateMultipartUploadRequest.Builder createMultipartUploadRequestBuilder = CreateMultipartUploadRequest.builder() .bucket(uploadRequest.getBucket()) - .key(uploadRequest.getKey()); + .key(uploadRequest.getKey()) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.multipartUploadMetricCollector)); if (uploadRequest.doRemoteDataIntegrityCheck()) { createMultipartUploadRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); } @@ -287,12 +295,14 @@ private void uploadInOneChunk( S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, InputStreamContainer inputStreamContainer, - CompletableFuture returnFuture + CompletableFuture returnFuture, + StatsMetricPublisher statsMetricPublisher ) { PutObjectRequest.Builder putObjectRequestBuilder = PutObjectRequest.builder() .bucket(uploadRequest.getBucket()) .key(uploadRequest.getKey()) - .contentLength(uploadRequest.getContentLength()); + .contentLength(uploadRequest.getContentLength()) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.putObjectMetricPublisher)); if (uploadRequest.doRemoteDataIntegrityCheck()) { putObjectRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); putObjectRequestBuilder.checksumCRC32(base64StringFromLong(uploadRequest.getExpectedChecksum())); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java index 9c07b929052bc..607453986ab16 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java @@ -33,6 +33,7 @@ import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.blobstore.ZeroInputStream; +import org.opensearch.repositories.s3.StatsMetricPublisher; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -80,7 +81,8 @@ public void testOneChunkUpload() { ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 1 - ) + ), + new StatsMetricPublisher() ); try { @@ -118,7 +120,8 @@ public void testOneChunkUploadCorruption() { ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 1 - ) + ), + new StatsMetricPublisher() ); try { @@ -169,7 +172,8 @@ public void testMultipartUpload() { ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 5 - ) + ), + new StatsMetricPublisher() ); try { @@ -219,7 +223,8 @@ public void testMultipartUploadCorruption() { ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 5 - ) + ), + new StatsMetricPublisher() ); try { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 69efea186d927..6ce6ca40cbce4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -57,6 +57,7 @@ import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; import org.opensearch.node.NodesResourceUsageStats; +import org.opensearch.repositories.RepositoriesStats; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; import org.opensearch.search.backpressure.stats.SearchBackpressureStats; @@ -146,6 +147,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private NodesResourceUsageStats resourceUsageStats; + @Nullable + private RepositoriesStats repositoriesStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -207,6 +211,11 @@ public NodeStats(StreamInput in) throws IOException { } else { resourceUsageStats = null; } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); + } else { + repositoriesStats = null; + } } public NodeStats( @@ -234,7 +243,8 @@ public NodeStats( @Nullable WeightedRoutingStats weightedRoutingStats, @Nullable FileCacheStats fileCacheStats, @Nullable TaskCancellationStats taskCancellationStats, - @Nullable SearchPipelineStats searchPipelineStats + @Nullable SearchPipelineStats searchPipelineStats, + @Nullable RepositoriesStats repositoriesStats ) { super(node); this.timestamp = timestamp; @@ -261,6 +271,7 @@ public NodeStats( this.fileCacheStats = fileCacheStats; this.taskCancellationStats = taskCancellationStats; this.searchPipelineStats = searchPipelineStats; + this.repositoriesStats = repositoriesStats; } public long getTimestamp() { @@ -403,6 +414,11 @@ public SearchPipelineStats getSearchPipelineStats() { return searchPipelineStats; } + @Nullable + public RepositoriesStats getRepositoriesStats() { + return repositoriesStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -449,6 +465,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_3_0_0)) { // make it 2.12 when we backport out.writeOptionalWriteable(resourceUsageStats); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(repositoriesStats); + } } @Override @@ -542,6 +561,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getResourceUsageStats() != null) { getResourceUsageStats().toXContent(builder, params); } + if (getRepositoriesStats() != null) { + getRepositoriesStats().toXContent(builder, params); + } return builder; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 99c9fb2d1e26a..88dff20354aa2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -214,7 +214,8 @@ public enum Metric { FILE_CACHE_STATS("file_cache"), TASK_CANCELLATION("task_cancellation"), SEARCH_PIPELINE("search_pipeline"), - RESOURCE_USAGE_STATS("resource_usage_stats"); + RESOURCE_USAGE_STATS("resource_usage_stats"), + REPOSITORIES("repositories"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 204157236a282..aa02f8e580f4a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -125,7 +125,8 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.FILE_CACHE_STATS.containedIn(metrics), NodesStatsRequest.Metric.TASK_CANCELLATION.containedIn(metrics), NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics), - NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics) + NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics), + NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index d8323e209be23..f51fabbfb2388 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -169,6 +169,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java index 2ee3e9557b354..0f6646d37f950 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java @@ -49,6 +49,9 @@ public interface BlobStore extends Closeable { */ BlobContainer blobContainer(BlobPath path); + /** + * Returns statistics on the count of operations that have been performed on this blob store + */ /** * Returns statistics on the count of operations that have been performed on this blob store */ @@ -56,8 +59,36 @@ default Map stats() { return Collections.emptyMap(); } + /** + * Returns details statistics of operations that have been performed on this blob store + */ + default Map> extendedStats() { + return Collections.emptyMap(); + } + /** * Reload the blob store inplace */ default void reload(RepositoryMetadata repositoryMetadata) {} + + /** + * Metrics for BlobStore interactions + */ + enum Metric { + REQUEST_SUCCESS("request_success_total"), + REQUEST_FAILURE("request_failures_total"), + REQUEST_LATENCY("request_time_in_millis"), + RETRY_COUNT("request_retry_count_total"); + + private String metricName; + + Metric(String name) { + this.metricName = name; + } + + public String metricName() { + return this.metricName; + } + } + } diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java index 4d2d69e473438..a18ca8b9d5c39 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java @@ -75,6 +75,16 @@ public Map stats() { return blobStore.stats(); } + /** + * Retrieves extended statistics about the BlobStore. Delegates the call to the underlying BlobStore's extendedStats() method. + * + * @return A map containing extended statistics about the BlobStore. + */ + @Override + public Map> extendedStats() { + return blobStore.extendedStats(); + } + /** * Closes the EncryptedBlobStore by decrementing the reference count of the CryptoManager and closing the * underlying BlobStore. This ensures proper cleanup of resources. diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 69b80462bbf0b..c9148f382a028 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -1115,7 +1115,8 @@ protected Node( searchPipelineService, fileCache, taskCancellationMonitoringService, - resourceUsageCollectorService + resourceUsageCollectorService, + repositoryService ); final SearchService searchService = newSearchService( diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 9bb07080fa717..e2d7bc2c86ba3 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -53,6 +53,7 @@ import org.opensearch.ingest.IngestService; import org.opensearch.monitor.MonitorService; import org.opensearch.plugins.PluginsService; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.AggregationUsageService; import org.opensearch.search.backpressure.SearchBackpressureService; @@ -93,6 +94,7 @@ public class NodeService implements Closeable { private final Discovery discovery; private final FileCache fileCache; private final TaskCancellationMonitoringService taskCancellationMonitoringService; + private final RepositoriesService repositoriesService; NodeService( Settings settings, @@ -116,7 +118,8 @@ public class NodeService implements Closeable { SearchPipelineService searchPipelineService, FileCache fileCache, TaskCancellationMonitoringService taskCancellationMonitoringService, - ResourceUsageCollectorService resourceUsageCollectorService + ResourceUsageCollectorService resourceUsageCollectorService, + RepositoriesService repositoriesService ) { this.settings = settings; this.threadPool = threadPool; @@ -140,6 +143,7 @@ public class NodeService implements Closeable { this.fileCache = fileCache; this.taskCancellationMonitoringService = taskCancellationMonitoringService; this.resourceUsageCollectorService = resourceUsageCollectorService; + this.repositoriesService = repositoriesService; clusterService.addStateApplier(ingestService); clusterService.addStateApplier(searchPipelineService); } @@ -221,7 +225,8 @@ public NodeStats stats( boolean fileCacheStats, boolean taskCancellation, boolean searchPipelineStats, - boolean resourceUsageStats + boolean resourceUsageStats, + boolean repositoriesStats ) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) @@ -250,7 +255,8 @@ public NodeStats stats( weightedRoutingStats ? WeightedRoutingStats.getInstance() : null, fileCacheStats && fileCache != null ? fileCache.fileCacheStats() : null, taskCancellation ? this.taskCancellationMonitoringService.stats() : null, - searchPipelineStats ? this.searchPipelineService.stats() : null + searchPipelineStats ? this.searchPipelineService.stats() : null, + repositoriesStats ? this.repositoriesService.getRepositoriesStats() : null ); } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index 72266c053a1ae..68669feb16abc 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -457,7 +457,6 @@ public void applyClusterState(ClusterChangedEvent event) { logger.debug("unregistering repository [{}]", entry.getKey()); Repository repository = entry.getValue(); closeRepository(repository); - archiveRepositoryStats(repository, state.version()); } else { survivors.put(entry.getKey(), entry.getValue()); } @@ -485,7 +484,6 @@ public void applyClusterState(ClusterChangedEvent event) { } else { logger.debug("updating repository [{}]", repositoryMetadata.name()); closeRepository(repository); - archiveRepositoryStats(repository, state.version()); repository = null; try { repository = createRepository(repositoryMetadata, typesRegistry); @@ -575,12 +573,12 @@ public Repository repository(String repositoryName) { } public List repositoriesStats() { - List archivedRepoStats = repositoriesStatsArchive.getArchivedStats(); List activeRepoStats = getRepositoryStatsForActiveRepositories(); + return activeRepoStats; + } - List repositoriesStats = new ArrayList<>(archivedRepoStats); - repositoriesStats.addAll(activeRepoStats); - return repositoriesStats; + public RepositoriesStats getRepositoriesStats() { + return new RepositoriesStats(repositoriesStats()); } private List getRepositoryStatsForActiveRepositories() { @@ -640,15 +638,6 @@ public void closeRepository(Repository repository) { repository.close(); } - private void archiveRepositoryStats(Repository repository, long clusterStateVersion) { - if (repository instanceof MeteredBlobStoreRepository) { - RepositoryStatsSnapshot stats = ((MeteredBlobStoreRepository) repository).statsSnapshotForArchival(clusterStateVersion); - if (repositoriesStatsArchive.archive(stats) == false) { - logger.warn("Unable to archive the repository stats [{}] as the archive is full.", stats); - } - } - } - /** * Creates repository holder. This method starts the non-internal repository */ diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java b/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java new file mode 100644 index 0000000000000..b24e0dddd852a --- /dev/null +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; + +/** + * Encapsulates stats for multiple repositories* + */ +public class RepositoriesStats implements Writeable, ToXContentObject { + + List repositoryStatsSnapshots; + + public RepositoriesStats(List repositoryStatsSnapshots) { + this.repositoryStatsSnapshots = repositoryStatsSnapshots; + } + + public RepositoriesStats(StreamInput in) throws IOException { + this.repositoryStatsSnapshots = in.readList(RepositoryStatsSnapshot::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(repositoryStatsSnapshots); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray("repositories"); + if (CollectionUtils.isEmpty(repositoryStatsSnapshots) == false) { + for (RepositoryStatsSnapshot repositoryStatsSnapshot : repositoryStatsSnapshots) { + repositoryStatsSnapshot.toXContent(builder, params); + } + } + builder.endArray(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java b/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java index b8f100706f81e..3d35f75176eaf 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java @@ -70,11 +70,6 @@ public RepositoriesStatsArchive(TimeValue retentionPeriod, int maxCapacity, Long * @return {@code true} if the repository stats were archived, {@code false} otherwise. */ synchronized boolean archive(final RepositoryStatsSnapshot repositoryStats) { - assert containsRepositoryStats(repositoryStats) == false : "A repository with ephemeral id " - + repositoryStats.getRepositoryInfo().ephemeralId - + " is already archived"; - assert repositoryStats.isArchived(); - evict(); if (archive.size() >= maxCapacity) { @@ -116,15 +111,6 @@ private void evict() { } } - private boolean containsRepositoryStats(RepositoryStatsSnapshot repositoryStats) { - return archive.stream() - .anyMatch( - entry -> entry.repositoryStatsSnapshot.getRepositoryInfo().ephemeralId.equals( - repositoryStats.getRepositoryInfo().ephemeralId - ) - ); - } - private static class ArchiveEntry { private final RepositoryStatsSnapshot repositoryStatsSnapshot; private final long createdAtMillis; diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java b/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java index 8aa86fc46d591..387a685bd6526 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java @@ -32,7 +32,6 @@ package org.opensearch.repositories; -import org.opensearch.common.Nullable; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -51,64 +50,27 @@ * @opensearch.internal */ public final class RepositoryInfo implements Writeable, ToXContentFragment { - public final String ephemeralId; public final String name; public final String type; public final Map location; - public final long startedAt; - @Nullable - public final Long stoppedAt; - public RepositoryInfo(String ephemeralId, String name, String type, Map location, long startedAt) { - this(ephemeralId, name, type, location, startedAt, null); - } - - public RepositoryInfo( - String ephemeralId, - String name, - String type, - Map location, - long startedAt, - @Nullable Long stoppedAt - ) { - this.ephemeralId = ephemeralId; + public RepositoryInfo(String name, String type, Map location) { this.name = name; this.type = type; this.location = location; - this.startedAt = startedAt; - if (stoppedAt != null && startedAt > stoppedAt) { - throw new IllegalArgumentException("createdAt must be before or equal to stoppedAt"); - } - this.stoppedAt = stoppedAt; } public RepositoryInfo(StreamInput in) throws IOException { - this.ephemeralId = in.readString(); this.name = in.readString(); this.type = in.readString(); this.location = in.readMap(StreamInput::readString, StreamInput::readString); - this.startedAt = in.readLong(); - this.stoppedAt = in.readOptionalLong(); - } - - public RepositoryInfo stopped(long stoppedAt) { - assert isStopped() == false : "The repository is already stopped"; - - return new RepositoryInfo(ephemeralId, name, type, location, startedAt, stoppedAt); - } - - public boolean isStopped() { - return stoppedAt != null; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(ephemeralId); out.writeString(name); out.writeString(type); out.writeMap(location, StreamOutput::writeString, StreamOutput::writeString); - out.writeLong(startedAt); - out.writeOptionalLong(stoppedAt); } @Override @@ -116,11 +78,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("repository_name", name); builder.field("repository_type", type); builder.field("repository_location", location); - builder.field("repository_ephemeral_id", ephemeralId); - builder.field("repository_started_at", startedAt); - if (stoppedAt != null) { - builder.field("repository_stopped_at", stoppedAt); - } return builder; } @@ -129,17 +86,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RepositoryInfo that = (RepositoryInfo) o; - return ephemeralId.equals(that.ephemeralId) - && name.equals(that.name) - && type.equals(that.type) - && location.equals(that.location) - && startedAt == that.startedAt - && Objects.equals(stoppedAt, that.stoppedAt); + return name.equals(that.name) && type.equals(that.type) && location.equals(that.location); } @Override public int hashCode() { - return Objects.hash(ephemeralId, name, type, location, startedAt, stoppedAt); + return Objects.hash(name, type, location); } @Override diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryStats.java b/server/src/main/java/org/opensearch/repositories/RepositoryStats.java index efd5d6f8560b6..ab97c5eaa1f7a 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryStats.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryStats.java @@ -32,9 +32,13 @@ package org.opensearch.repositories; +import org.opensearch.common.Nullable; +import org.opensearch.common.blobstore.BlobStore; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.Collections; @@ -47,32 +51,63 @@ * * @opensearch.internal */ -public class RepositoryStats implements Writeable { +public class RepositoryStats implements Writeable, ToXContentFragment { public static final RepositoryStats EMPTY_STATS = new RepositoryStats(Collections.emptyMap()); + @Nullable public final Map requestCounts; + @Nullable + public final Map> extendedStats; + public final boolean detailed; public RepositoryStats(Map requestCounts) { this.requestCounts = Collections.unmodifiableMap(requestCounts); + this.extendedStats = Collections.emptyMap(); + this.detailed = false; + } + + public RepositoryStats(Map> extendedStats, boolean detailed) { + this.requestCounts = Collections.emptyMap(); + this.extendedStats = Collections.unmodifiableMap(extendedStats); + this.detailed = detailed; } public RepositoryStats(StreamInput in) throws IOException { this.requestCounts = in.readMap(StreamInput::readString, StreamInput::readLong); + this.extendedStats = in.readMap( + e -> e.readEnum(BlobStore.Metric.class), + i -> i.readMap(StreamInput::readString, StreamInput::readLong) + ); + this.detailed = in.readBoolean(); } public RepositoryStats merge(RepositoryStats otherStats) { - final Map result = new HashMap<>(); - result.putAll(requestCounts); - for (Map.Entry entry : otherStats.requestCounts.entrySet()) { - result.merge(entry.getKey(), entry.getValue(), Math::addExact); + assert this.detailed == otherStats.detailed; + if (detailed) { + final Map> result = new HashMap<>(); + result.putAll(extendedStats); + for (Map.Entry> entry : otherStats.extendedStats.entrySet()) { + for (Map.Entry nested : entry.getValue().entrySet()) { + result.get(entry.getKey()).merge(nested.getKey(), nested.getValue(), Math::addExact); + } + } + return new RepositoryStats(result, true); + } else { + final Map result = new HashMap<>(); + result.putAll(requestCounts); + for (Map.Entry entry : otherStats.requestCounts.entrySet()) { + result.merge(entry.getKey(), entry.getValue(), Math::addExact); + } + return new RepositoryStats(result); } - return new RepositoryStats(result); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(requestCounts, StreamOutput::writeString, StreamOutput::writeLong); + out.writeMap(extendedStats, StreamOutput::writeEnum, (o, v) -> o.writeMap(v, StreamOutput::writeString, StreamOutput::writeLong)); + out.writeBoolean(detailed); } @Override @@ -80,16 +115,32 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RepositoryStats that = (RepositoryStats) o; - return requestCounts.equals(that.requestCounts); + return requestCounts.equals(that.requestCounts) && extendedStats.equals(that.extendedStats) && detailed == that.detailed; } @Override public int hashCode() { - return Objects.hash(requestCounts); + return Objects.hash(requestCounts, detailed, extendedStats); } @Override public String toString() { - return "RepositoryStats{" + "requestCounts=" + requestCounts + '}'; + return "RepositoryStats{" + "requestCounts=" + requestCounts + "extendedStats=" + extendedStats + "detailed =" + detailed + "}"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (detailed == false) { + builder.field("request_counts", requestCounts); + } else { + extendedStats.forEach((k, v) -> { + try { + builder.field(k.metricName(), v); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + return builder; } } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java b/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java index 2b061cd2c2cc9..0a727980fad0d 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java @@ -53,21 +53,17 @@ public final class RepositoryStatsSnapshot implements Writeable, ToXContentObjec private final RepositoryInfo repositoryInfo; private final RepositoryStats repositoryStats; private final long clusterVersion; - private final boolean archived; - public RepositoryStatsSnapshot(RepositoryInfo repositoryInfo, RepositoryStats repositoryStats, long clusterVersion, boolean archived) { - assert archived != (clusterVersion == UNKNOWN_CLUSTER_VERSION); + public RepositoryStatsSnapshot(RepositoryInfo repositoryInfo, RepositoryStats repositoryStats, long clusterVersion) { this.repositoryInfo = repositoryInfo; this.repositoryStats = repositoryStats; this.clusterVersion = clusterVersion; - this.archived = archived; } public RepositoryStatsSnapshot(StreamInput in) throws IOException { this.repositoryInfo = new RepositoryInfo(in); this.repositoryStats = new RepositoryStats(in); this.clusterVersion = in.readLong(); - this.archived = in.readBoolean(); } public RepositoryInfo getRepositoryInfo() { @@ -78,10 +74,6 @@ public RepositoryStats getRepositoryStats() { return repositoryStats; } - public boolean isArchived() { - return archived; - } - public long getClusterVersion() { return clusterVersion; } @@ -91,18 +83,13 @@ public void writeTo(StreamOutput out) throws IOException { repositoryInfo.writeTo(out); repositoryStats.writeTo(out); out.writeLong(clusterVersion); - out.writeBoolean(archived); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); repositoryInfo.toXContent(builder, params); - builder.field("request_counts", repositoryStats.requestCounts); - builder.field("archived", archived); - if (archived) { - builder.field("cluster_version", clusterVersion); - } + repositoryStats.toXContent(builder, params); builder.endObject(); return builder; } @@ -114,13 +101,12 @@ public boolean equals(Object o) { RepositoryStatsSnapshot that = (RepositoryStatsSnapshot) o; return repositoryInfo.equals(that.repositoryInfo) && repositoryStats.equals(that.repositoryStats) - && clusterVersion == that.clusterVersion - && archived == that.archived; + && clusterVersion == that.clusterVersion; } @Override public int hashCode() { - return Objects.hash(repositoryInfo, repositoryStats, clusterVersion, archived); + return Objects.hash(repositoryInfo, repositoryStats, clusterVersion); } @Override diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index c48efa9d3d2ee..8a2260e1f6d90 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -851,6 +851,8 @@ public RepositoryStats stats() { final BlobStore store = blobStore.get(); if (store == null) { return RepositoryStats.EMPTY_STATS; + } else if (store.extendedStats() != null && store.extendedStats().isEmpty() == false) { + return new RepositoryStats(store.extendedStats(), true); } return new RepositoryStats(store.stats()); } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java index d4921f4e6d2e7..0651ff586d412 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java @@ -34,12 +34,10 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.UUIDs; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.RepositoryInfo; import org.opensearch.repositories.RepositoryStatsSnapshot; -import org.opensearch.threadpool.ThreadPool; import java.util.Map; @@ -59,14 +57,7 @@ public MeteredBlobStoreRepository( Map location ) { super(metadata, namedXContentRegistry, clusterService, recoverySettings); - ThreadPool threadPool = clusterService.getClusterApplierService().threadPool(); - this.repositoryInfo = new RepositoryInfo( - UUIDs.randomBase64UUID(), - metadata.name(), - metadata.type(), - location, - threadPool.absoluteTimeInMillis() - ); + this.repositoryInfo = new RepositoryInfo(metadata.name(), metadata.type(), location); } @Override @@ -78,11 +69,6 @@ public void reload(RepositoryMetadata repositoryMetadata) { } public RepositoryStatsSnapshot statsSnapshot() { - return new RepositoryStatsSnapshot(repositoryInfo, stats(), RepositoryStatsSnapshot.UNKNOWN_CLUSTER_VERSION, false); - } - - public RepositoryStatsSnapshot statsSnapshotForArchival(long clusterVersion) { - RepositoryInfo stoppedRepoInfo = repositoryInfo.stopped(threadPool.absoluteTimeInMillis()); - return new RepositoryStatsSnapshot(stoppedRepoInfo, stats(), clusterVersion, true); + return new RepositoryStatsSnapshot(repositoryInfo, stats(), RepositoryStatsSnapshot.UNKNOWN_CLUSTER_VERSION); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 4b865383ee007..3491f18da9550 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -852,6 +852,7 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { weightedRoutingStats, null, null, + null, null ); } diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index 64949cf861f70..6f03e87bf5824 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -191,6 +191,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -218,6 +219,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -245,6 +247,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ) ); @@ -303,6 +306,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -330,6 +334,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -357,6 +362,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ) ); diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index c4599a6e7a00e..43ebb86fd5342 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -219,20 +219,17 @@ public void testRepositoriesStatsCanHaveTheSameNameAndDifferentTypeOverTime() { assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", emptyState(), clusterStateWithRepoTypeA)); - assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(0)); ClusterState clusterStateWithRepoTypeB = createClusterStateWithRepo(repoName, MeteredRepositoryTypeB.TYPE); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeB, emptyState())); List repositoriesStats = repositoriesService.repositoriesStats(); - assertThat(repositoriesStats.size(), equalTo(2)); + assertThat(repositoriesStats.size(), equalTo(1)); RepositoryStatsSnapshot repositoryStatsTypeA = repositoriesStats.get(0); - assertThat(repositoryStatsTypeA.getRepositoryInfo().type, equalTo(MeteredRepositoryTypeA.TYPE)); - assertThat(repositoryStatsTypeA.getRepositoryStats(), equalTo(MeteredRepositoryTypeA.STATS)); + assertThat(repositoryStatsTypeA.getRepositoryInfo().type, equalTo(MeteredRepositoryTypeB.TYPE)); + assertThat(repositoryStatsTypeA.getRepositoryStats(), equalTo(MeteredRepositoryTypeB.STATS)); - RepositoryStatsSnapshot repositoryStatsTypeB = repositoriesStats.get(1); - assertThat(repositoryStatsTypeB.getRepositoryInfo().type, equalTo(MeteredRepositoryTypeB.TYPE)); - assertThat(repositoryStatsTypeB.getRepositoryStats(), equalTo(MeteredRepositoryTypeB.STATS)); } public void testWithSameKeyProviderNames() { @@ -258,7 +255,7 @@ public void testWithSameKeyProviderNames() { kpTypeA ); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeB, emptyState())); - assertThat(repositoriesService.repositoriesStats().size(), equalTo(2)); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); MeteredRepositoryTypeB repositoryB = (MeteredRepositoryTypeB) repositoriesService.repository("repoName"); assertNotNull(repositoryB); assertNotNull(repository.cryptoHandler); diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java index cf0b06a3f7d16..da0cbcb1d4b17 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java @@ -32,7 +32,6 @@ package org.opensearch.repositories; -import org.opensearch.common.UUIDs; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; @@ -122,14 +121,11 @@ private RepositoryStatsSnapshot createRepositoryStats(RepositoryStats repository private RepositoryStatsSnapshot createRepositoryStats(RepositoryStats repositoryStats, long clusterVersion) { RepositoryInfo repositoryInfo = new RepositoryInfo( - UUIDs.randomBase64UUID(), randomAlphaOfLength(10), randomAlphaOfLength(10), - Map.of("bucket", randomAlphaOfLength(10)), - System.currentTimeMillis(), - null + Map.of("bucket", randomAlphaOfLength(10)) ); - return new RepositoryStatsSnapshot(repositoryInfo, repositoryStats, clusterVersion, true); + return new RepositoryStatsSnapshot(repositoryInfo, repositoryStats, clusterVersion); } } diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index a520b6278ea47..60a54110fd0b4 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -121,7 +121,8 @@ List adjustNodesStats(List nodesStats) { nodeStats.getWeightedRoutingStats(), nodeStats.getFileCacheStats(), nodeStats.getTaskCancellationStats(), - nodeStats.getSearchPipelineStats() + nodeStats.getSearchPipelineStats(), + nodeStats.getRepositoriesStats() ); }).collect(Collectors.toList()); } diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java index dff9b997d87db..faa9d52b105b2 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java @@ -238,7 +238,7 @@ public void testRequestStats() throws Exception { assertEquals(assertionErrorMsg, mockCalls, sdkRequestCounts); } - private Map getMockRequestCounts() { + protected Map getMockRequestCounts() { for (HttpHandler h : handlers.values()) { while (h instanceof DelegatingHttpHandler) { if (h instanceof HttpStatsCollectorHandler) { diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index a290f3a1751a0..898e125b94954 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2721,6 +2721,7 @@ public void ensureEstimatedStats() { false, false, false, + false, false ); assertThat( From d2c2e20f50c2020fc7906ee5db466b1158ace832 Mon Sep 17 00:00:00 2001 From: Stephen Crawford <65832608+scrawfor99@users.noreply.github.com> Date: Wed, 18 Oct 2023 09:39:08 -0400 Subject: [PATCH 034/178] OnBehalfOf claims take second duration (#10664) OnBehalfOf claims take second duration Signed-off-by: Stephen Crawford --- CHANGELOG.md | 2 +- .../opensearch/identity/tokens/AuthToken.java | 1 + .../identity/tokens/OnBehalfOfClaims.java | 55 +++---------------- 3 files changed, 9 insertions(+), 49 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0742215a2dfb9..f9724033f540a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) - Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) -- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) - [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) - [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) diff --git a/server/src/main/java/org/opensearch/identity/tokens/AuthToken.java b/server/src/main/java/org/opensearch/identity/tokens/AuthToken.java index c929e7421b3d8..88bb855a6e70d 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/AuthToken.java +++ b/server/src/main/java/org/opensearch/identity/tokens/AuthToken.java @@ -16,4 +16,5 @@ public interface AuthToken { String asAuthHeaderValue(); + } diff --git a/server/src/main/java/org/opensearch/identity/tokens/OnBehalfOfClaims.java b/server/src/main/java/org/opensearch/identity/tokens/OnBehalfOfClaims.java index 3fef248ee6d3a..00e50a59e9486 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/OnBehalfOfClaims.java +++ b/server/src/main/java/org/opensearch/identity/tokens/OnBehalfOfClaims.java @@ -14,46 +14,17 @@ public class OnBehalfOfClaims { private final String audience; - private final String subject; - private final Long expiration; - private final Long not_before; - private final Long issued_at; + private final Long expiration_seconds; /** * Constructor for OnBehalfOfClaims * @param aud the Audience for the token - * @param subject the subject of the token - * @param expiration the expiration time in seconds for the token - * @param not_before the not_before time in seconds for the token - * @param issued_at the issued_at time in seconds for the token - */ - public OnBehalfOfClaims(String aud, String subject, Long expiration, Long not_before, Long issued_at) { - this.audience = aud; - this.subject = subject; - this.expiration = expiration; - this.not_before = not_before; - this.issued_at = issued_at; - } - - /** - * A constructor that sets a default issued at time of the current time - * @param aud the Audience for the token - * @param subject the subject of the token - * @param expiration the expiration time in seconds for the token - * @param not_before the not_before time in seconds for the token - */ - public OnBehalfOfClaims(String aud, String subject, Long expiration, Long not_before) { - this(aud, subject, expiration, not_before, System.currentTimeMillis() / 1000); - } + * @param expiration_seconds the length of time in seconds the token is valid - /** - * A constructor which sets a default not before time of the current time - * @param aud the Audience for the token - * @param subject the subject of the token - * @param expiration the expiration time in seconds for the token */ - public OnBehalfOfClaims(String aud, String subject, Long expiration) { - this(aud, subject, expiration, System.currentTimeMillis() / 1000); + public OnBehalfOfClaims(String aud, Long expiration_seconds) { + this.audience = aud; + this.expiration_seconds = expiration_seconds; } /** @@ -62,26 +33,14 @@ public OnBehalfOfClaims(String aud, String subject, Long expiration) { * @param subject the subject of the token */ public OnBehalfOfClaims(String aud, String subject) { - this(aud, subject, System.currentTimeMillis() / 1000 + 300); + this(aud, 300L); } public String getAudience() { return audience; } - public String getSubject() { - return subject; - } - public Long getExpiration() { - return expiration; - } - - public Long getNot_before() { - return not_before; - } - - public Long getIssued_at() { - return issued_at; + return expiration_seconds; } } From c00141770f1cde3a26ab3e514866ad136379c7f0 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Wed, 18 Oct 2023 08:12:53 -0700 Subject: [PATCH 035/178] Fix download reporting for segment replication (#10644) Signed-off-by: Kunal Kotwani --- .../PrimaryShardReplicationSource.java | 6 ++ .../RemoteStoreReplicationSource.java | 9 ++- .../replication/SegmentReplicationSource.java | 73 +++++++++++++++++++ .../replication/SegmentReplicationTarget.java | 23 +++++- .../index/shard/RemoteIndexShardTests.java | 4 +- .../SegmentReplicationIndexShardTests.java | 2 + ...licationWithNodeToNodeIndexShardTests.java | 6 ++ .../PrimaryShardReplicationSourceTests.java | 3 + .../RemoteStoreReplicationSourceTests.java | 20 ++++- .../SegmentReplicationTargetServiceTests.java | 4 + .../SegmentReplicationTargetTests.java | 50 +++++++++++++ .../replication/TestReplicationSource.java | 2 + .../index/shard/IndexShardTestCase.java | 2 + 13 files changed, 198 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index 9dcd16c53e6f3..02fc8feefd698 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -22,6 +22,7 @@ import org.opensearch.transport.TransportService; import java.util.List; +import java.util.function.BiConsumer; import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO; import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES; @@ -80,8 +81,13 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { + // fileProgressTracker is a no-op for node to node recovery + // MultiFileWriter takes care of progress tracking for downloads in this scenario + // TODO: Move state management and tracking into replication methods and use chunking and data + // copy mechanisms only from MultiFileWriter final Writeable.Reader reader = GetSegmentFilesResponse::new; final ActionListener responseListener = ActionListener.map(listener, r -> r); final GetSegmentFilesRequest request = new GetSegmentFilesRequest( diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index d2000a56401f5..12eabf1e6554f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -29,6 +29,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.BiConsumer; import java.util.stream.Collectors; /** @@ -95,6 +96,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { try { @@ -117,7 +119,12 @@ public void getSegmentFiles( assert directoryFiles.contains(file) == false : "Local store already contains the file " + file; toDownloadSegmentNames.add(file); } - indexShard.getFileDownloader().download(remoteDirectory, storeDirectory, toDownloadSegmentNames); + indexShard.getFileDownloader() + .download( + remoteDirectory, + new ReplicationStatsDirectoryWrapper(storeDirectory, fileProgressTracker), + toDownloadSegmentNames + ); logger.debug("Downloaded segment files from remote store {}", filesToFetch); } finally { indexShard.store().decRef(); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java index 6676b5b667e42..24f0cb15ddb25 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java @@ -8,13 +8,19 @@ package org.opensearch.indices.replication; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; import org.opensearch.common.util.CancellableThreads.ExecutionCancelledException; import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import java.io.IOException; import java.util.List; +import java.util.function.BiConsumer; /** * Represents the source of a replication event. @@ -39,6 +45,7 @@ public interface SegmentReplicationSource { * @param checkpoint {@link ReplicationCheckpoint} Checkpoint to fetch metadata for. * @param filesToFetch {@link List} List of files to fetch. * @param indexShard {@link IndexShard} Reference to the IndexShard. + * @param fileProgressTracker {@link BiConsumer} A consumer that updates the replication progress for shard files. * @param listener {@link ActionListener} Listener that completes with the list of files copied. */ void getSegmentFiles( @@ -46,6 +53,7 @@ void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ); @@ -58,4 +66,69 @@ void getSegmentFiles( * Cancel any ongoing requests, should resolve any ongoing listeners with onFailure with a {@link ExecutionCancelledException}. */ default void cancel() {} + + /** + * Directory wrapper that records copy process for replication statistics + * + * @opensearch.internal + */ + final class ReplicationStatsDirectoryWrapper extends FilterDirectory { + private final BiConsumer fileProgressTracker; + + ReplicationStatsDirectoryWrapper(Directory in, BiConsumer fileProgressTracker) { + super(in); + this.fileProgressTracker = fileProgressTracker; + } + + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + // here we wrap the index input form the source directory to report progress of file copy for the recovery stats. + // we increment the num bytes recovered in the readBytes method below, if users pull statistics they can see immediately + // how much has been recovered. + in.copyFrom(new FilterDirectory(from) { + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + final IndexInput input = in.openInput(name, context); + return new IndexInput("StatsDirectoryWrapper(" + input.toString() + ")") { + @Override + public void close() throws IOException { + input.close(); + } + + @Override + public long getFilePointer() { + throw new UnsupportedOperationException("only straight copies are supported"); + } + + @Override + public void seek(long pos) throws IOException { + throw new UnsupportedOperationException("seeks are not supported"); + } + + @Override + public long length() { + return input.length(); + } + + @Override + public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + throw new UnsupportedOperationException("slices are not supported"); + } + + @Override + public byte readByte() throws IOException { + throw new UnsupportedOperationException("use a buffer if you wanna perform well"); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + // we rely on the fact that copyFrom uses a buffer + input.readBytes(b, offset, len); + fileProgressTracker.accept(dest, (long) len); + } + }; + } + }, src, dest, context); + } + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 0eb6ce36fa63d..cd6dbe8af90d9 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -170,7 +170,14 @@ public void startReplication(ActionListener listener) { final List filesToFetch = getFiles(checkpointInfo); state.setStage(SegmentReplicationState.Stage.GET_FILES); cancellableThreads.checkForCancel(); - source.getSegmentFiles(getId(), checkpointInfo.getCheckpoint(), filesToFetch, indexShard, getFilesListener); + source.getSegmentFiles( + getId(), + checkpointInfo.getCheckpoint(), + filesToFetch, + indexShard, + this::updateFileRecoveryBytes, + getFilesListener + ); }, listener::onFailure); getFilesListener.whenComplete(response -> { @@ -240,6 +247,20 @@ private boolean validateLocalChecksum(StoreFileMetadata file) { } } + /** + * Updates the state to reflect recovery progress for the given file and + * updates the last access time for the target. + * @param fileName Name of the file being downloaded + * @param bytesRecovered Number of bytes recovered + */ + private void updateFileRecoveryBytes(String fileName, long bytesRecovered) { + ReplicationLuceneIndex index = state.getIndex(); + if (index != null) { + index.addRecoveredBytesToFile(fileName, bytesRecovered); + } + setLastAccessTime(); + } + private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) throws OpenSearchCorruptionException { cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index 6a99063d11353..fe389e3b3fcb4 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -43,6 +43,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; import java.util.stream.Collectors; import static org.opensearch.index.engine.EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber; @@ -388,9 +389,10 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { - super.getSegmentFiles(replicationId, checkpoint, filesToFetch, indexShard, listener); + super.getSegmentFiles(replicationId, checkpoint, filesToFetch, indexShard, (fileName, bytesRecovered) -> {}, listener); runAfterGetFiles[index.getAndIncrement()].run(); } diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 528402d48658a..52f28aead533d 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -91,6 +91,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; import java.util.function.Function; import static org.opensearch.index.engine.EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber; @@ -725,6 +726,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { // set the listener, we will only fail it once we cancel the source. diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java index c394101697b47..f0950fe5392de 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java @@ -47,6 +47,7 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -87,6 +88,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { // randomly resolve the listener, indicating the source has resolved. @@ -131,6 +133,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { Assert.fail("Should not be reached"); @@ -176,6 +179,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { Assert.fail("Unreachable"); @@ -223,6 +227,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) {} }; @@ -269,6 +274,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index bcacef83d190a..e4dd32e5c6f70 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -125,6 +125,7 @@ public void testGetSegmentFiles() { checkpoint, Arrays.asList(testMetadata), mock(IndexShard.class), + (fileName, bytesRecovered) -> {}, mock(ActionListener.class) ); CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); @@ -153,6 +154,7 @@ public void testTransportTimeoutForGetSegmentFilesAction() { checkpoint, Arrays.asList(testMetadata), mock(IndexShard.class), + (fileName, bytesRecovered) -> {}, mock(ActionListener.class) ); CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); @@ -178,6 +180,7 @@ public void testGetSegmentFiles_CancelWhileRequestOpen() throws InterruptedExcep checkpoint, Arrays.asList(testMetadata), mock(IndexShard.class), + (fileName, bytesRecovered) -> {}, new ActionListener<>() { @Override public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { diff --git a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java index 9204f48ba5bdd..287962b158c79 100644 --- a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java @@ -90,7 +90,7 @@ public void testGetSegmentFiles() throws ExecutionException, InterruptedExceptio List filesToFetch = primaryShard.getSegmentMetadataMap().values().stream().collect(Collectors.toList()); final PlainActionFuture res = PlainActionFuture.newFuture(); replicationSource = new RemoteStoreReplicationSource(primaryShard); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, filesToFetch, replicaShard, res); + replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, filesToFetch, replicaShard, (fileName, bytesRecovered) -> {}, res); GetSegmentFilesResponse response = res.get(); assertEquals(response.files.size(), filesToFetch.size()); assertTrue(response.files.containsAll(filesToFetch)); @@ -104,7 +104,14 @@ public void testGetSegmentFilesAlreadyExists() throws IOException, InterruptedEx try { final PlainActionFuture res = PlainActionFuture.newFuture(); replicationSource = new RemoteStoreReplicationSource(primaryShard); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, filesToFetch, primaryShard, res); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + filesToFetch, + primaryShard, + (fileName, bytesRecovered) -> {}, + res + ); res.get(); } catch (AssertionError | ExecutionException ex) { latch.countDown(); @@ -118,7 +125,14 @@ public void testGetSegmentFilesReturnEmptyResponse() throws ExecutionException, final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); final PlainActionFuture res = PlainActionFuture.newFuture(); replicationSource = new RemoteStoreReplicationSource(primaryShard); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, Collections.emptyList(), primaryShard, res); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + Collections.emptyList(), + primaryShard, + (fileName, bytesRecovered) -> {}, + res + ); GetSegmentFilesResponse response = res.get(); assert (response.files.isEmpty()); } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index d16a188f405e3..7b02635525264 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -53,6 +53,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; @@ -212,6 +213,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { Assert.fail("Should not be called"); @@ -277,6 +279,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); @@ -334,6 +337,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { Assert.fail("Unreachable"); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index a9d7d3cdd32fc..8b4b3aff701b4 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -40,6 +40,7 @@ import org.opensearch.index.store.StoreTests; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationFailedException; +import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.DummyShardLock; import org.opensearch.test.IndexSettingsModule; @@ -53,6 +54,7 @@ import java.util.List; import java.util.Map; import java.util.Random; +import java.util.function.BiConsumer; import org.mockito.Mockito; @@ -131,10 +133,12 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { assertEquals(1, filesToFetch.size()); assert (filesToFetch.contains(SEGMENT_FILE)); + filesToFetch.forEach(storeFileMetadata -> fileProgressTracker.accept(storeFileMetadata.name(), storeFileMetadata.length())); listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } }; @@ -149,6 +153,19 @@ public void getSegmentFiles( public void onResponse(Void replicationResponse) { try { verify(spyIndexShard, times(1)).finalizeReplication(any()); + assertEquals( + 1, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); + assertEquals( + 0, + segrepTarget.state().getIndex().fileDetails().stream().filter(file -> file.fullyRecovered() == false).count() + ); segrepTarget.markAsDone(); } catch (IOException ex) { Assert.fail(); @@ -182,6 +199,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -200,6 +218,15 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals( + 0, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); assertEquals(exception, e.getCause().getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } @@ -225,6 +252,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onFailure(exception); @@ -243,6 +271,15 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals( + 0, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); assertEquals(exception, e.getCause().getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } @@ -268,6 +305,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -314,6 +352,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -358,6 +397,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -376,6 +416,15 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals( + 0, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); assertTrue(e instanceof OpenSearchCorruptionException); assertTrue(e.getMessage().contains("has local copies of segments that differ from the primary")); segrepTarget.fail(new ReplicationFailedException(e), false); @@ -410,6 +459,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); diff --git a/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java b/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java index b29e25a0bff2c..bcd47e3d578ee 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java @@ -17,6 +17,7 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import java.util.List; +import java.util.function.BiConsumer; /** * This class is used by unit tests implementing SegmentReplicationSource @@ -36,6 +37,7 @@ public abstract void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 9dc230474482f..412d5235fe462 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -174,6 +174,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -1620,6 +1621,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { try ( From 3a36c2256e838fc0ce04b5c144257e3096ca29b7 Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Wed, 18 Oct 2023 20:46:20 +0530 Subject: [PATCH 036/178] Download functionality of global metadata from remote store (#10535) * Download funcationality of global metadata from remote store Signed-off-by: Dhwanil Patel --- CHANGELOG.md | 1 + .../remote/RemoteClusterStateServiceIT.java | 4 +- .../remote/RemoteClusterStateService.java | 64 ++++++++-- .../recovery/RemoteStoreRestoreService.java | 3 +- .../RemoteClusterStateServiceTests.java | 115 ++++++++++++++++-- 5 files changed, 168 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9724033f540a..872cf022671cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) - [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) - [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java index 6fcc89cfe9e9a..7304304e522f8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -86,10 +86,10 @@ public void testFullClusterRestoreStaleDelete() throws Exception { assertEquals(10, repository.blobStore().blobContainer(baseMetadataPath.add("manifest")).listBlobsByPrefix("manifest").size()); - Map indexMetadataMap = remoteClusterStateService.getLatestIndexMetadata( + Map indexMetadataMap = remoteClusterStateService.getLatestMetadata( cluster().getClusterName(), getClusterState().metadata().clusterUUID() - ); + ).getIndices(); assertEquals(0, indexMetadataMap.values().stream().findFirst().get().getNumberOfReplicas()); assertEquals(shardCount, indexMetadataMap.values().stream().findFirst().get().getNumberOfShards()); } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 2092c2a0aac3f..358ce600a49d8 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -662,18 +662,18 @@ private BlobPath getManifestFolderPath(String clusterName, String clusterUUID) { * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster + * @param clusterMetadataManifest manifest file of cluster * @return {@code Map} latest IndexUUID to IndexMetadata map */ - public Map getLatestIndexMetadata(String clusterName, String clusterUUID) throws IOException { - start(); - Map remoteIndexMetadata = new HashMap<>(); - Optional clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); - if (!clusterMetadataManifest.isPresent()) { - throw new IllegalStateException("Latest index metadata is not present for the provided clusterUUID"); - } - assert Objects.equals(clusterUUID, clusterMetadataManifest.get().getClusterUUID()) + private Map getIndexMetadataMap( + String clusterName, + String clusterUUID, + ClusterMetadataManifest clusterMetadataManifest + ) { + assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; - for (UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.get().getIndices()) { + Map remoteIndexMetadata = new HashMap<>(); + for (UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { IndexMetadata indexMetadata = getIndexMetadata(clusterName, clusterUUID, uploadedIndexMetadata); remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); } @@ -704,6 +704,52 @@ private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, U } } + /** + * Fetch latest metadata from remote cluster state including global metadata and index metadata + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return {@link IndexMetadata} + */ + public Metadata getLatestMetadata(String clusterName, String clusterUUID) throws IOException { + start(); + Optional clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + if (!clusterMetadataManifest.isPresent()) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Latest cluster metadata manifest is not present for the provided clusterUUID: %s", clusterUUID) + ); + } + // Fetch Global Metadata + Metadata globalMetadata = getGlobalMetadata(clusterName, clusterUUID, clusterMetadataManifest.get()); + + // Fetch Index Metadata + Map indices = getIndexMetadataMap(clusterName, clusterUUID, clusterMetadataManifest.get()); + + return Metadata.builder(globalMetadata).indices(indices).build(); + } + + private Metadata getGlobalMetadata(String clusterName, String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { + String globalMetadataFileName = clusterMetadataManifest.getGlobalMetadataFileName(); + try { + // Fetch Global metadata + if (globalMetadataFileName != null) { + String[] splitPath = globalMetadataFileName.split("/"); + return GLOBAL_METADATA_FORMAT.read( + globalMetadataContainer(clusterName, clusterUUID), + splitPath[splitPath.length - 1], + blobStoreRepository.getNamedXContentRegistry() + ); + } else { + return Metadata.EMPTY_METADATA; + } + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while downloading Global Metadata - %s", globalMetadataFileName), + e + ); + } + } + /** * Fetch latest ClusterMetadataManifest from remote state store * diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index 94fd08b99ac58..dec999e43110f 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -141,7 +141,8 @@ public RemoteRestoreResult restore( || restoreClusterUUID.isBlank()) == false; if (metadataFromRemoteStore) { try { - remoteClusterStateService.getLatestIndexMetadata(currentState.getClusterName().value(), restoreClusterUUID) + remoteClusterStateService.getLatestMetadata(currentState.getClusterName().value(), restoreClusterUUID) + .getIndices() .values() .forEach(indexMetadata -> { indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index ddee3bcf10382..49b7f0ff8d1a9 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -12,6 +12,7 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; @@ -28,6 +29,7 @@ import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; @@ -634,7 +636,8 @@ public void testReadLatestMetadataManifestSuccessButNoIndexMetadata() throws IOE remoteClusterStateService.start(); assertEquals( - remoteClusterStateService.getLatestIndexMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + remoteClusterStateService.getLatestMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + .getIndices() .size(), 0 ); @@ -662,10 +665,8 @@ public void testReadLatestMetadataManifestSuccessButIndexMetadataFetchIOExceptio remoteClusterStateService.start(); Exception e = assertThrows( IllegalStateException.class, - () -> remoteClusterStateService.getLatestIndexMetadata( - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() - ) + () -> remoteClusterStateService.getLatestMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + .getIndices() ); assertEquals(e.getMessage(), "Error while downloading IndexMetadata - " + uploadedIndexMetadata.getUploadedFilename()); } @@ -704,6 +705,70 @@ public void testReadLatestMetadataManifestSuccess() throws IOException { assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); } + public void testReadGlobalMetadata() throws IOException { + when(blobStoreRepository.getNamedXContentRegistry()).thenReturn(new NamedXContentRegistry( + List.of(new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(IndexGraveyard.TYPE), IndexGraveyard::fromXContent)))); + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + remoteClusterStateService.start(); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) + .globalMetadataFileName("global-metadata-file") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .build(); + + Metadata expactedMetadata = Metadata.builder().persistentSettings(Settings.builder().put("readonly", true).build()).build(); + mockBlobContainerForGlobalMetadata(mockBlobStoreObjects(), expectedManifest, expactedMetadata); + + Metadata metadata = remoteClusterStateService.getLatestMetadata( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + + assertTrue(Metadata.isGlobalStateEquals(metadata, expactedMetadata)); + } + + public void testReadGlobalMetadataIOException() throws IOException { + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + remoteClusterStateService.start(); + String globalIndexMetadataName = "global-metadata-file"; + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) + .globalMetadataFileName(globalIndexMetadataName) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .build(); + + Metadata expactedMetadata = Metadata.builder().persistentSettings(Settings.builder().put("readonly", true).build()).build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + mockBlobContainerForGlobalMetadata(blobContainer, expectedManifest, expactedMetadata); + + when(blobContainer.readBlob(RemoteClusterStateService.GLOBAL_METADATA_FORMAT.blobName(globalIndexMetadataName))).thenThrow( + FileNotFoundException.class + ); + + remoteClusterStateService.start(); + Exception e = assertThrows( + IllegalStateException.class, + () -> remoteClusterStateService.getLatestMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + ); + assertEquals(e.getMessage(), "Error while downloading Global Metadata - " + globalIndexMetadataName); + } + public void testReadLatestIndexMetadataSuccess() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); remoteClusterStateService.start(); @@ -730,15 +795,16 @@ public void testReadLatestIndexMetadataSuccess() throws IOException { .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID("prev-cluster-uuid") + .globalMetadataFileName("global-metadata-file") .codecVersion(ClusterMetadataManifest.CODEC_V0) .build(); mockBlobContainer(mockBlobStoreObjects(), expectedManifest, Map.of(index.getUUID(), indexMetadata)); - Map indexMetadataMap = remoteClusterStateService.getLatestIndexMetadata( + Map indexMetadataMap = remoteClusterStateService.getLatestMetadata( clusterState.getClusterName().value(), clusterState.metadata().clusterUUID() - ); + ).getIndices(); assertEquals(indexMetadataMap.size(), 1); assertEquals(indexMetadataMap.get(index.getUUID()).getIndex().getName(), index.getName()); @@ -1114,6 +1180,41 @@ private void mockBlobContainer( }); } + private void mockBlobContainerForGlobalMetadata( + BlobContainer blobContainer, + ClusterMetadataManifest clusterMetadataManifest, + Metadata metadata + ) throws IOException { + String mockManifestFileName = "manifest__1__2__C__456__1"; + BlobMetadata blobMetadata = new PlainBlobMetadata(mockManifestFileName, 1); + when( + blobContainer.listBlobsByPrefixInSortedOrder( + "manifest" + RemoteClusterStateService.DELIMITER, + 1, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ) + ).thenReturn(Arrays.asList(blobMetadata)); + + BytesReference bytes = RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( + clusterMetadataManifest, + mockManifestFileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + when(blobContainer.readBlob(mockManifestFileName)).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); + + BytesReference bytesGlobalMetadata = RemoteClusterStateService.GLOBAL_METADATA_FORMAT.serialize( + metadata, + "global-metadata-file", + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + String[] splitPath = clusterMetadataManifest.getGlobalMetadataFileName().split("/"); + when(blobContainer.readBlob(RemoteClusterStateService.GLOBAL_METADATA_FORMAT.blobName(splitPath[splitPath.length - 1]))).thenReturn( + new ByteArrayInputStream(bytesGlobalMetadata.streamInput().readAllBytes()) + ); + } + private static ClusterState.Builder generateClusterStateWithGlobalMetadata() { final Settings clusterSettings = Settings.builder().put("cluster.blocks.read_only", true).build(); final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); From 56736be1adaa0c7e97d8db159ff6dca6f6814a9e Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Wed, 18 Oct 2023 23:11:53 +0530 Subject: [PATCH 037/178] Remote Restore Cluster Metadata if local disk state lost after quorum loss recovery (#10447) * Remote Restore Cluster Metadata if local disk state lost after quorum loss Signed-off-by: bansvaru --- CHANGELOG.md | 4 + .../remotestore/BaseRemoteStoreRestoreIT.java | 8 +- .../RemoteStoreClusterStateRestoreIT.java | 137 ++++++++++++++++++ .../opensearch/gateway/GatewayMetaState.java | 58 ++++---- .../remote/RemoteClusterStateService.java | 7 +- .../recovery/RemoteStoreRestoreService.java | 58 ++++++-- .../GatewayMetaStatePersistedStateTests.java | 83 ++++++++++- .../gateway/MockGatewayMetaState.java | 25 +++- 8 files changed, 330 insertions(+), 50 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 872cf022671cb..f4332c7847db6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) - [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) - [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +<<<<<<< Updated upstream - [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) +======= +- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +>>>>>>> Stashed changes ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java index ad3e99dd274ce..b8481610869e6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -58,7 +58,7 @@ protected void restore(String... indices) { ); } - protected void verifyRestoredData(Map indexStats, String indexName) throws Exception { + protected void verifyRestoredData(Map indexStats, String indexName, boolean indexMoreData) throws Exception { ensureYellowAndNoInitializingShards(indexName); ensureGreen(indexName); // This is to ensure that shards that were already assigned will get latest count @@ -68,6 +68,8 @@ protected void verifyRestoredData(Map indexStats, String indexName 30, TimeUnit.SECONDS ); + if (indexMoreData == false) return; + IndexResponse response = indexSingleDoc(indexName); if (indexStats.containsKey(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id())) { assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id()) + 1, response.getSeqNo()); @@ -80,6 +82,10 @@ protected void verifyRestoredData(Map indexStats, String indexName ); } + protected void verifyRestoredData(Map indexStats, String indexName) throws Exception { + verifyRestoredData(indexStats, indexName, true); + } + public void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { prepareCluster(numClusterManagerNodes, numDataOnlyNodes, indices, replicaCount, shardCount, Settings.EMPTY); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 3df4cc4e34d93..3a3e293de9b13 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -8,9 +8,15 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; @@ -19,11 +25,19 @@ import java.io.IOException; import java.nio.file.Files; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.ExecutionException; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_SETTING; +import static org.opensearch.cluster.metadata.Metadata.CLUSTER_READ_ONLY_BLOCK; +import static org.opensearch.cluster.metadata.Metadata.SETTING_READ_ONLY_SETTING; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreClusterStateRestoreIT extends BaseRemoteStoreRestoreIT { @@ -86,6 +100,7 @@ public void testFullClusterRestoreMultipleIndices() throws Exception { Map indexStats2 = indexData(1, false, secondIndexName); assertEquals((shardCount + 1) * (replicaCount + 1), getNumShards(secondIndexName).totalNumShards); ensureGreen(secondIndexName); + updateIndexBlock(true, secondIndexName); String prevClusterUUID = clusterService().state().metadata().clusterUUID(); @@ -98,6 +113,13 @@ public void testFullClusterRestoreMultipleIndices() throws Exception { // Step - 3 Trigger full cluster restore validateMetadata(List.of(INDEX_NAME, secondIndexName)); verifyRestoredData(indexStats, INDEX_NAME); + verifyRestoredData(indexStats2, secondIndexName, false); + assertTrue(INDEX_READ_ONLY_SETTING.get(clusterService().state().metadata().index(secondIndexName).getSettings())); + assertThrows(ClusterBlockException.class, () -> indexSingleDoc(secondIndexName)); + // Test is complete + + // Remove the block to ensure proper cleanup + updateIndexBlock(false, secondIndexName); } public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathThrowsException() throws Exception { @@ -192,4 +214,119 @@ private void validateCurrentMetadata() throws Exception { } }); } + + public void testFullClusterRestoreGlobalMetadata() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // Step - 1 index some data to generate files in remote directory + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + + // Create global metadata - register a custom repo + // TODO - uncomment after all customs is also uploaded for all repos - https://github.com/opensearch-project/OpenSearch/issues/10691 + // registerCustomRepository(); + + // Create global metadata - persistent settings + updatePersistentSettings(Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), 34).build()); + + // Create global metadata - index template + putIndexTemplate(); + + // Create global metadata - Put cluster block + addClusterLevelReadOnlyBlock(); + + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + resetCluster(dataNodeCount, clusterManagerNodeCount); + + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + // Step - 3 Trigger full cluster restore and validate + // validateCurrentMetadata(); + verifyRestoredData(indexStats, INDEX_NAME, false); + + // validate global metadata restored + verifyRestoredRepositories(); + verifyRestoredIndexTemplate(); + assertEquals(Integer.valueOf(34), SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(clusterService().state().metadata().settings())); + assertEquals(true, SETTING_READ_ONLY_SETTING.get(clusterService().state().metadata().settings())); + assertTrue(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + // Test is complete + + // Remote the cluster read only block to ensure proper cleanup + updatePersistentSettings(Settings.builder().put(SETTING_READ_ONLY_SETTING.getKey(), false).build()); + assertFalse(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + } + + private void registerCustomRepository() { + assertAcked( + client().admin() + .cluster() + .preparePutRepository("custom-repo") + .setType("fs") + .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", false)) + .get() + ); + } + + private void verifyRestoredRepositories() { + RepositoriesMetadata repositoriesMetadata = clusterService().state().metadata().custom(RepositoriesMetadata.TYPE); + assertEquals(2, repositoriesMetadata.repositories().size()); // includes remote store repo as well + assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_NAME).settings())); + assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_2_NAME).settings())); + // TODO - uncomment after all customs is also uploaded for all repos - https://github.com/opensearch-project/OpenSearch/issues/10691 + // assertEquals("fs", repositoriesMetadata.repository("custom-repo").type()); + // assertEquals(Settings.builder().put("location", randomRepoPath()).put("compress", false).build(), + // repositoriesMetadata.repository("custom-repo").settings()); + } + + private void addClusterLevelReadOnlyBlock() throws InterruptedException, ExecutionException { + updatePersistentSettings(Settings.builder().put(SETTING_READ_ONLY_SETTING.getKey(), true).build()); + assertTrue(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + } + + private void updatePersistentSettings(Settings settings) throws ExecutionException, InterruptedException { + ClusterUpdateSettingsRequest resetRequest = new ClusterUpdateSettingsRequest(); + resetRequest.persistentSettings(settings); + assertAcked(client().admin().cluster().updateSettings(resetRequest).get()); + } + + private void verifyRestoredIndexTemplate() { + Map indexTemplateMetadataMap = clusterService().state().metadata().templates(); + assertEquals(1, indexTemplateMetadataMap.size()); + assertEquals(Arrays.asList("pattern-1", "log-*"), indexTemplateMetadataMap.get("my-template").patterns()); + assertEquals( + Settings.builder() // <1> + .put("index.number_of_shards", 3) + .put("index.number_of_replicas", 1) + .build(), + indexTemplateMetadataMap.get("my-template").settings() + ); + } + + private static void putIndexTemplate() { + PutIndexTemplateRequest request = new PutIndexTemplateRequest("my-template"); // <1> + request.patterns(Arrays.asList("pattern-1", "log-*")); // <2> + + request.settings( + Settings.builder() // <1> + .put("index.number_of_shards", 3) + .put("index.number_of_replicas", 1) + ); + assertTrue(client().admin().indices().putTemplate(request).actionGet().isAcknowledged()); + } + + private static void updateIndexBlock(boolean value, String secondIndexName) throws InterruptedException, ExecutionException { + assertAcked( + client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(Settings.builder().put(INDEX_READ_ONLY_SETTING.getKey(), value).build(), secondIndexName) + ) + .get() + ); + } } diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index d3e7a0c482ee2..9eb7fb0ca04d0 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -158,38 +158,44 @@ public void start( PersistedState remotePersistedState = null; boolean success = false; try { - ClusterState clusterState = prepareInitialClusterState( + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) + .version(lastAcceptedVersion) + .metadata(metadata) + .build(); + + if (DiscoveryNode.isClusterManagerNode(settings) && isRemoteStoreClusterStateEnabled(settings)) { + // If the cluster UUID loaded from local is unknown (_na_) then fetch the best state from remote + // If there is no valid state on remote, continue with initial empty state + // If there is a valid state, then restore index metadata using this state + String lastKnownClusterUUID = ClusterState.UNKNOWN_UUID; + if (ClusterState.UNKNOWN_UUID.equals(clusterState.metadata().clusterUUID())) { + lastKnownClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote( + clusterState.getClusterName().value() + ); + if (ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID) == false) { + // Load state from remote + final RemoteRestoreResult remoteRestoreResult = remoteStoreRestoreService.restore( + clusterState, + lastKnownClusterUUID, + false, + new String[] {} + ); + clusterState = remoteRestoreResult.getClusterState(); + } + } + remotePersistedState = new RemotePersistedState(remoteClusterStateService, lastKnownClusterUUID); + } + + // Recovers Cluster and Index level blocks + clusterState = prepareInitialClusterState( transportService, clusterService, - ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) - .version(lastAcceptedVersion) - .metadata(upgradeMetadataForNode(metadata, metadataIndexUpgradeService, metadataUpgrader)) + ClusterState.builder(clusterState) + .metadata(upgradeMetadataForNode(clusterState.metadata(), metadataIndexUpgradeService, metadataUpgrader)) .build() ); if (DiscoveryNode.isClusterManagerNode(settings)) { - if (isRemoteStoreClusterStateEnabled(settings)) { - // If the cluster UUID loaded from local is unknown (_na_) then fetch the best state from remote - // If there is no valid state on remote, continue with initial empty state - // If there is a valid state, then restore index metadata using this state - String lastKnownClusterUUID = ClusterState.UNKNOWN_UUID; - if (ClusterState.UNKNOWN_UUID.equals(clusterState.metadata().clusterUUID())) { - lastKnownClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote( - clusterState.getClusterName().value() - ); - if (ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID) == false) { - // Load state from remote - final RemoteRestoreResult remoteRestoreResult = remoteStoreRestoreService.restore( - clusterState, - lastKnownClusterUUID, - false, - new String[] {} - ); - clusterState = remoteRestoreResult.getClusterState(); - } - } - remotePersistedState = new RemotePersistedState(remoteClusterStateService, lastKnownClusterUUID); - } persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState); } else { persistedState = new AsyncLucenePersistedState( diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 358ce600a49d8..b9d06c8fbb1c1 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -711,7 +711,7 @@ private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, U * @param clusterName name of the cluster * @return {@link IndexMetadata} */ - public Metadata getLatestMetadata(String clusterName, String clusterUUID) throws IOException { + public Metadata getLatestMetadata(String clusterName, String clusterUUID) { start(); Optional clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); if (!clusterMetadataManifest.isPresent()) { @@ -759,10 +759,7 @@ private Metadata getGlobalMetadata(String clusterName, String clusterUUID, Clust */ public Optional getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { Optional latestManifestFileName = getLatestManifestFileName(clusterName, clusterUUID); - if (latestManifestFileName.isPresent()) { - return Optional.of(fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, latestManifestFileName.get())); - } - return Optional.empty(); + return latestManifestFileName.map(s -> fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, s)); } /** diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index dec999e43110f..ac9cf35d1d8e5 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -10,15 +10,16 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.Version; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.MetadataCreateIndexService; import org.opensearch.cluster.metadata.MetadataIndexUpgradeService; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; @@ -27,6 +28,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; @@ -42,11 +44,13 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; /** * Service responsible for restoring index data from remote store @@ -136,17 +140,16 @@ public RemoteRestoreResult restore( String[] indexNames ) { Map> indexMetadataMap = new HashMap<>(); + Metadata remoteMetadata = null; boolean metadataFromRemoteStore = (restoreClusterUUID == null || restoreClusterUUID.isEmpty() || restoreClusterUUID.isBlank()) == false; if (metadataFromRemoteStore) { try { - remoteClusterStateService.getLatestMetadata(currentState.getClusterName().value(), restoreClusterUUID) - .getIndices() - .values() - .forEach(indexMetadata -> { - indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); - }); + remoteMetadata = remoteClusterStateService.getLatestMetadata(currentState.getClusterName().value(), restoreClusterUUID); + remoteMetadata.getIndices().values().forEach(indexMetadata -> { + indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); + }); } catch (Exception e) { throw new IllegalStateException("Unable to restore remote index metadata", e); } @@ -161,7 +164,7 @@ public RemoteRestoreResult restore( } } validate(currentState, indexMetadataMap, restoreClusterUUID, restoreAllShards); - return executeRestore(currentState, indexMetadataMap, restoreAllShards); + return executeRestore(currentState, indexMetadataMap, restoreAllShards, remoteMetadata); } /** @@ -174,7 +177,8 @@ public RemoteRestoreResult restore( private RemoteRestoreResult executeRestore( ClusterState currentState, Map> indexMetadataMap, - boolean restoreAllShards + boolean restoreAllShards, + Metadata remoteMetadata ) { final String restoreUUID = UUIDs.randomBase64UUID(); List indicesToBeRestored = new ArrayList<>(); @@ -227,6 +231,10 @@ private RemoteRestoreResult executeRestore( totalShards += updatedIndexMetadata.getNumberOfShards(); } + if (remoteMetadata != null) { + restoreGlobalMetadata(mdBuilder, remoteMetadata); + } + RestoreInfo restoreInfo = new RestoreInfo("remote_store", indicesToBeRestored, totalShards, totalShards); RoutingTable rt = rtBuilder.build(); @@ -234,6 +242,36 @@ private RemoteRestoreResult executeRestore( return RemoteRestoreResult.build(restoreUUID, restoreInfo, allocationService.reroute(updatedState, "restored from remote store")); } + private void restoreGlobalMetadata(Metadata.Builder mdBuilder, Metadata remoteMetadata) { + if (remoteMetadata.persistentSettings() != null) { + Settings settings = remoteMetadata.persistentSettings(); + clusterService.getClusterSettings().validateUpdate(settings); + mdBuilder.persistentSettings(settings); + } + if (remoteMetadata.templates() != null) { + for (final IndexTemplateMetadata cursor : remoteMetadata.templates().values()) { + mdBuilder.put(cursor); + } + } + if (remoteMetadata.customs() != null) { + for (final Map.Entry cursor : remoteMetadata.customs().entrySet()) { + if (RepositoriesMetadata.TYPE.equals(cursor.getKey()) == false) { + mdBuilder.putCustom(cursor.getKey(), cursor.getValue()); + } + } + } + Optional repositoriesMetadata = Optional.ofNullable(remoteMetadata.custom(RepositoriesMetadata.TYPE)); + repositoriesMetadata = repositoriesMetadata.map( + repositoriesMetadata1 -> new RepositoriesMetadata( + repositoriesMetadata1.repositories() + .stream() + .filter(repository -> SYSTEM_REPOSITORY_SETTING.get(repository.settings()) == false) + .collect(Collectors.toList()) + ) + ); + repositoriesMetadata.ifPresent(metadata -> mdBuilder.putCustom(RepositoriesMetadata.TYPE, metadata)); + } + /** * Performs various validations needed before executing restore * @param currentState current cluster state @@ -298,8 +336,6 @@ private void validate( throw new IllegalStateException(finalErrorMsg); } - Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion().minimumIndexCompatibilityVersion(); - metadataIndexUpgradeService.upgradeIndexMetadata(indexMetadata, minIndexCompatibilityVersion); boolean isHidden = IndexMetadata.INDEX_HIDDEN_SETTING.get(indexMetadata.getSettings()); createIndexService.validateIndexName(indexName, currentState); createIndexService.validateDotIndex(indexName, isHidden); diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index c7ed1cb732154..1d5c2a0f01b5c 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -840,7 +840,8 @@ public void testGatewayForRemoteStateForInitialBootstrap() throws IOException { remoteClusterStateService, remoteStoreRestoreService, persistedStateRegistry, - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + false ); final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); @@ -886,7 +887,8 @@ public void testGatewayForRemoteStateForNodeReplacement() throws IOException { remoteClusterStateService, remoteStoreRestoreService, persistedStateRegistry, - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + false ); final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); @@ -918,7 +920,13 @@ public void testGatewayForRemoteStateForNodeReboot() throws IOException { .clusterUUID(randomAlphaOfLength(10)) .build() ); - gateway = newGatewayForRemoteState(remoteClusterStateService, remoteStoreRestoreService, persistedStateRegistry, clusterState); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + clusterState, + false + ); final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); verifyNoInteractions(remoteClusterStateService); @@ -933,13 +941,77 @@ public void testGatewayForRemoteStateForNodeReboot() throws IOException { } } + public void testGatewayForRemoteStateForInitialBootstrapBlocksApplied() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn("test-cluster-uuid"); + + final IndexMetadata indexMetadata = IndexMetadata.builder("test-index1") + .settings( + settings(Version.CURRENT).put(SETTING_INDEX_UUID, randomAlphaOfLength(10)) + .put(IndexMetadata.INDEX_READ_ONLY_SETTING.getKey(), true) + ) + .numberOfShards(5) + .numberOfReplicas(1) + .build(); + + final ClusterState clusterState = ClusterState.builder( + createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) + .put(indexMetadata, false) + .clusterUUID(ClusterState.UNKNOWN_UUID) + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build() + ) + ).nodes(DiscoveryNodes.EMPTY_NODES).build(); + + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, clusterState) + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE, + true + ); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + PersistedState lucenePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL); + verify(remoteClusterStateService).getLastKnownUUIDFromRemote(clusterName.value()); // change this + verify(remoteStoreRestoreService).restore(any(ClusterState.class), any(String.class), anyBoolean(), any(String[].class)); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat( + Metadata.isGlobalStateEquals(lucenePersistedState.getLastAcceptedState().metadata(), clusterState.metadata()), + equalTo(true) + ); + assertThat( + lucenePersistedState.getLastAcceptedState().blocks().hasGlobalBlock(Metadata.CLUSTER_READ_ONLY_BLOCK), + equalTo(true) + ); + assertThat( + IndexMetadata.INDEX_READ_ONLY_SETTING.get( + lucenePersistedState.getLastAcceptedState().metadata().index("test-index1").getSettings() + ), + equalTo(true) + ); + } finally { + IOUtils.close(gateway); + } + } + private MockGatewayMetaState newGatewayForRemoteState( RemoteClusterStateService remoteClusterStateService, RemoteStoreRestoreService remoteStoreRestoreService, PersistedStateRegistry persistedStateRegistry, - ClusterState currentState + ClusterState currentState, + boolean prepareFullState ) throws IOException { - MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays); + MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays, prepareFullState); String randomRepoName = "randomRepoName"; String stateRepoTypeAttributeKey = String.format( Locale.getDefault(), @@ -963,6 +1035,7 @@ private MockGatewayMetaState newGatewayForRemoteState( when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); + when(transportService.getLocalNode()).thenReturn(mock(DiscoveryNode.class)); final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService( nodeEnvironment, xContentRegistry(), diff --git a/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java index d77596cf5cdd1..2f006a5519d69 100644 --- a/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java +++ b/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java @@ -67,6 +67,12 @@ public class MockGatewayMetaState extends GatewayMetaState { private final BigArrays bigArrays; private final RemoteClusterStateService remoteClusterStateService; private final RemoteStoreRestoreService remoteStoreRestoreService; + private boolean prepareFullState = false; + + public MockGatewayMetaState(DiscoveryNode localNode, BigArrays bigArrays, boolean prepareFullState) { + this(localNode, bigArrays); + this.prepareFullState = prepareFullState; + } public MockGatewayMetaState(DiscoveryNode localNode, BigArrays bigArrays) { this.localNode = localNode; @@ -99,8 +105,12 @@ Metadata upgradeMetadataForNode( @Override ClusterState prepareInitialClusterState(TransportService transportService, ClusterService clusterService, ClusterState clusterState) { - // Just set localNode here, not to mess with ClusterService and IndicesService mocking - return ClusterStateUpdaters.setLocalNode(clusterState, localNode); + if (prepareFullState) { + return super.prepareInitialClusterState(transportService, clusterService, clusterState); + } else { + // Just set localNode here, not to mess with ClusterService and IndicesService mocking + return ClusterStateUpdaters.setLocalNode(clusterState, localNode); + } } @Override @@ -113,6 +123,16 @@ public void start( NodeEnvironment nodeEnvironment, NamedXContentRegistry xContentRegistry, PersistedStateRegistry persistedStateRegistry + ) { + start(settings, nodeEnvironment, xContentRegistry, persistedStateRegistry, false); + } + + public void start( + Settings settings, + NodeEnvironment nodeEnvironment, + NamedXContentRegistry xContentRegistry, + PersistedStateRegistry persistedStateRegistry, + boolean prepareFullState ) { final TransportService transportService = mock(TransportService.class); when(transportService.getThreadPool()).thenReturn(mock(ThreadPool.class)); @@ -126,6 +146,7 @@ public void start( } catch (IOException e) { throw new AssertionError(e); } + this.prepareFullState = prepareFullState; start( settings, transportService, From 64539d8ad7fe299e867ffb41b30513f8cbe50378 Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Wed, 18 Oct 2023 23:23:10 +0530 Subject: [PATCH 038/178] fix change log conflicts (#10713) Signed-off-by: bansvaru --- CHANGELOG.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f4332c7847db6..0ad18b94f31b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,11 +16,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) - [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) - [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) -<<<<<<< Updated upstream - [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) -======= - [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) ->>>>>>> Stashed changes ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 From 267bd5a84dfc62b265c053e5cd3abf31b9e01ff5 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Wed, 18 Oct 2023 23:45:36 +0530 Subject: [PATCH 039/178] Version fixes for Resource Usage Stats and FS Stats (#10712) Signed-off-by: Gaurav Bafna --- .../action/admin/cluster/node/stats/NodeStats.java | 4 ++-- .../src/main/java/org/opensearch/monitor/fs/FsInfo.java | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 6ce6ca40cbce4..0c8aa027e5f01 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -206,7 +206,7 @@ public NodeStats(StreamInput in) throws IOException { } else { searchPipelineStats = null; } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { // make it 2.12 when we backport + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { resourceUsageStats = in.readOptionalWriteable(NodesResourceUsageStats::new); } else { resourceUsageStats = null; @@ -462,7 +462,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeOptionalWriteable(searchPipelineStats); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { // make it 2.12 when we backport + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(resourceUsageStats); } if (out.getVersion().onOrAfter(Version.V_3_0_0)) { diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java index 4e2e9f280d765..8446ab0dd6166 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java @@ -335,7 +335,7 @@ public DeviceStats(StreamInput in) throws IOException { previousSectorsRead = in.readLong(); currentSectorsWritten = in.readLong(); previousSectorsWritten = in.readLong(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { currentReadTime = in.readLong(); previousReadTime = in.readLong(); currentWriteTime = in.readLong(); @@ -369,7 +369,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(previousSectorsRead); out.writeLong(currentSectorsWritten); out.writeLong(previousSectorsWritten); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeLong(currentReadTime); out.writeLong(previousReadTime); out.writeLong(currentWriteTime); @@ -533,7 +533,7 @@ public IoStats(StreamInput in) throws IOException { this.totalWriteOperations = in.readLong(); this.totalReadKilobytes = in.readLong(); this.totalWriteKilobytes = in.readLong(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { this.totalReadTime = in.readLong(); this.totalWriteTime = in.readLong(); this.totalQueueSize = in.readLong(); @@ -557,7 +557,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(totalWriteOperations); out.writeLong(totalReadKilobytes); out.writeLong(totalWriteKilobytes); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeLong(totalReadTime); out.writeLong(totalWriteTime); out.writeLong(totalQueueSize); From 5ec2fe9fb1b215be0da604f4f7e91a75d15f5f87 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Wed, 18 Oct 2023 12:09:01 -0700 Subject: [PATCH 040/178] Add missing entry for 2.11 release notes (#10679) Signed-off-by: Kunal Kotwani --- release-notes/opensearch.release-notes-2.11.0.md | 1 + 1 file changed, 1 insertion(+) diff --git a/release-notes/opensearch.release-notes-2.11.0.md b/release-notes/opensearch.release-notes-2.11.0.md index 7ebf1b433c7c6..040cc053469ed 100644 --- a/release-notes/opensearch.release-notes-2.11.0.md +++ b/release-notes/opensearch.release-notes-2.11.0.md @@ -5,6 +5,7 @@ ### Added - Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) - Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) +- Add parallel file download support for remote store based replication ([#8596](https://github.com/opensearch-project/OpenSearch/pull/8596)) - Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) - [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) - Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) From a3c1d505903361a757945400c440aeac9e7ad973 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Wed, 18 Oct 2023 18:53:01 -0700 Subject: [PATCH 041/178] Add @abbashus and @adnapibar back to CODEOWNERS (#10681) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 4fa118e8486f1..8076adcf00ca9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @reta @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @gbbafna @setiah @kartg @kotwanikunal @mch2 @nknize @owaiskazi19 @peternied @Rishikesh1159 @ryanbogan @saratvemulapalli @shwetathareja @dreamer-89 @tlfeng @VachaShah @dbwiddis @sachinpkale @sohami @msfroh +* @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kartg @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah From 7936f94ef6692516c58f4c46a8a3747da9cd9f1d Mon Sep 17 00:00:00 2001 From: Vikas Bansal <43470111+vikasvb90@users.noreply.github.com> Date: Thu, 19 Oct 2023 08:40:13 +0530 Subject: [PATCH 042/178] Added close on buffered stream in s3 async upload for additional cleanup (#10710) Signed-off-by: vikasvb90 --- .../s3/async/AsyncPartsHandler.java | 44 +++++++++++------- .../s3/async/AsyncTransferManager.java | 20 +++++--- .../s3/async/AsyncTransferManagerTests.java | 46 ++++++++++++++----- 3 files changed, 75 insertions(+), 35 deletions(-) diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java index 86bb70e5a40a2..6007d9f9c8a1c 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java @@ -29,6 +29,7 @@ import java.io.BufferedInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -140,28 +141,39 @@ private static void uploadPart( ExecutorService streamReadExecutor = uploadRequest.getWritePriority() == WritePriority.HIGH ? priorityExecutorService : executorService; + // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered + // data can be retried instead of retrying whole file by the application. + InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); CompletableFuture uploadPartResponseFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.uploadPart( uploadPartRequest, - AsyncRequestBody.fromInputStream( - // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered - // data can be retried instead of retrying whole file by the application. - new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)), - inputStreamContainer.getContentLength(), - streamReadExecutor - ) + AsyncRequestBody.fromInputStream(inputStream, inputStreamContainer.getContentLength(), streamReadExecutor) ) ); - CompletableFuture convertFuture = uploadPartResponseFuture.thenApply( - uploadPartResponse -> convertUploadPartResponse( - completedParts, - inputStreamContainers, - uploadPartResponse, - partNumber, - uploadRequest.doRemoteDataIntegrityCheck() - ) - ); + CompletableFuture convertFuture = uploadPartResponseFuture.whenComplete((resp, throwable) -> { + try { + inputStream.close(); + } catch (IOException ex) { + log.error( + () -> new ParameterizedMessage( + "Failed to close stream while uploading a part of idx {} and file {}.", + uploadPartRequest.partNumber(), + uploadPartRequest.key() + ), + ex + ); + } + }) + .thenApply( + uploadPartResponse -> convertUploadPartResponse( + completedParts, + inputStreamContainers, + uploadPartResponse, + partNumber, + uploadRequest.doRemoteDataIntegrityCheck() + ) + ); futures.add(convertFuture); CompletableFutureUtils.forwardExceptionTo(convertFuture, uploadPartResponseFuture); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java index db04636b89d50..a52745e33073e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java @@ -40,6 +40,7 @@ import java.io.BufferedInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.Arrays; import java.util.Base64; import java.util.List; @@ -310,17 +311,22 @@ private void uploadInOneChunk( ExecutorService streamReadExecutor = uploadRequest.getWritePriority() == WritePriority.HIGH ? priorityExecutorService : executorService; + // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered + // data can be retried instead of retrying whole file by the application. + InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); CompletableFuture putObjectFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.putObject( putObjectRequestBuilder.build(), - AsyncRequestBody.fromInputStream( - // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered - // data can be retried instead of retrying whole file by the application. - new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)), - inputStreamContainer.getContentLength(), - streamReadExecutor - ) + AsyncRequestBody.fromInputStream(inputStream, inputStreamContainer.getContentLength(), streamReadExecutor) ).handle((resp, throwable) -> { + try { + inputStream.close(); + } catch (IOException e) { + log.error( + () -> new ParameterizedMessage("Failed to close stream while uploading single file {}.", uploadRequest.getKey()), + e + ); + } if (throwable != null) { Throwable unwrappedThrowable = ExceptionsHelper.unwrap(throwable, S3Exception.class); if (unwrappedThrowable != null) { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java index 607453986ab16..97a746cdeed93 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java @@ -37,9 +37,14 @@ import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicReference; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -71,17 +76,16 @@ public void testOneChunkUpload() { putObjectResponseCompletableFuture ); + AtomicReference streamRef = new AtomicReference<>(); CompletableFuture resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(1), WritePriority.HIGH, uploadSuccess -> { // do nothing }, false, null), - new StreamContext( - (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), - ByteSizeUnit.MB.toBytes(1), - ByteSizeUnit.MB.toBytes(1), - 1 - ), + new StreamContext((partIdx, partSize, position) -> { + streamRef.set(new ZeroInputStream(partSize)); + return new InputStreamContainer(streamRef.get(), partSize, position); + }, ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 1), new StatsMetricPublisher() ); @@ -92,6 +96,14 @@ public void testOneChunkUpload() { } verify(s3AsyncClient, times(1)).putObject(any(PutObjectRequest.class), any(AsyncRequestBody.class)); + + boolean closeError = false; + try { + streamRef.get().available(); + } catch (IOException e) { + closeError = e.getMessage().equals("Stream closed"); + } + assertTrue("InputStream was still open after upload", closeError); } public void testOneChunkUploadCorruption() { @@ -162,17 +174,17 @@ public void testMultipartUpload() { abortMultipartUploadResponseCompletableFuture ); + List streams = new ArrayList<>(); CompletableFuture resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(5), WritePriority.HIGH, uploadSuccess -> { // do nothing }, true, 3376132981L), - new StreamContext( - (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), - ByteSizeUnit.MB.toBytes(1), - ByteSizeUnit.MB.toBytes(1), - 5 - ), + new StreamContext((partIdx, partSize, position) -> { + InputStream stream = new ZeroInputStream(partSize); + streams.add(stream); + return new InputStreamContainer(stream, partSize, position); + }, ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 5), new StatsMetricPublisher() ); @@ -182,6 +194,16 @@ public void testMultipartUpload() { fail("did not expect resultFuture to fail"); } + streams.forEach(stream -> { + boolean closeError = false; + try { + stream.available(); + } catch (IOException e) { + closeError = e.getMessage().equals("Stream closed"); + } + assertTrue("InputStream was still open after upload", closeError); + }); + verify(s3AsyncClient, times(1)).createMultipartUpload(any(CreateMultipartUploadRequest.class)); verify(s3AsyncClient, times(5)).uploadPart(any(UploadPartRequest.class), any(AsyncRequestBody.class)); verify(s3AsyncClient, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); From 1d23b88cbf2177861f3a98ab906accf39c32c766 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Thu, 19 Oct 2023 12:28:09 +0530 Subject: [PATCH 043/178] Muting s3 request stats test (#10736) Signed-off-by: Gaurav Bafna --- .../opensearch/repositories/s3/S3BlobStoreRepositoryTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 1361f3165b653..4df30bfd2169e 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -165,6 +165,7 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10735") @Override public void testRequestStats() throws Exception { final String repository = createRepository(randomName()); From 3899d117722e5517c3c02709cffc981bdf677fd5 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Thu, 19 Oct 2023 17:49:27 +0530 Subject: [PATCH 044/178] Changing version for repo stats blob post backport to 2.x (#10717) Signed-off-by: Gaurav Bafna --- .../opensearch/action/admin/cluster/node/stats/NodeStats.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 0c8aa027e5f01..e9bfa358103c8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -211,7 +211,7 @@ public NodeStats(StreamInput in) throws IOException { } else { resourceUsageStats = null; } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); } else { repositoriesStats = null; @@ -465,7 +465,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(resourceUsageStats); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(repositoriesStats); } } From 69f6f4e30909e215b4ca7fd55fd80cf8c4e8d3a4 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 19 Oct 2023 10:36:25 -0400 Subject: [PATCH 045/178] [AUTO] [main] Add bwc version 2.11.1. (#10648) * Add bwc version 2.11.1 Signed-off-by: GitHub * Update Version.java Signed-off-by: Andriy Redko --------- Signed-off-by: GitHub Signed-off-by: Andriy Redko Co-authored-by: opensearch-ci-bot Co-authored-by: Andriy Redko --- .ci/bwcVersions | 1 + libs/core/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index cfaadc5ed1e5e..144a8b71fca39 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -25,4 +25,5 @@ BWC_VERSION: - "2.10.0" - "2.10.1" - "2.11.0" + - "2.11.1" - "2.12.0" diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index eef4da719994c..8d9ee73a02c1d 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -96,6 +96,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_10_0 = new Version(2100099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_10_1 = new Version(2100199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_11_1 = new Version(2110199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version CURRENT = V_3_0_0; From da24ca756a3140f062e2c54d8fd0be88dc62e355 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 19 Oct 2023 15:25:46 -0400 Subject: [PATCH 046/178] Performance Improvement for Datetime formats (update version checks to 2.12.0) (#10754) Signed-off-by: Andriy Redko --- .../src/main/java/org/opensearch/search/DocValueFormat.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/DocValueFormat.java b/server/src/main/java/org/opensearch/search/DocValueFormat.java index 412191c57abd8..7be51643eeb7d 100644 --- a/server/src/main/java/org/opensearch/search/DocValueFormat.java +++ b/server/src/main/java/org/opensearch/search/DocValueFormat.java @@ -243,7 +243,7 @@ public DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resolu } public DateTime(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { this.formatter = DateFormatter.forPattern(in.readString(), in.readOptionalString()); } else { this.formatter = DateFormatter.forPattern(in.readString()); @@ -265,12 +265,12 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(Version.V_3_0_0) && formatter.equals(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER)) { + if (out.getVersion().before(Version.V_2_12_0) && formatter.equals(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER)) { out.writeString(DateFieldMapper.LEGACY_DEFAULT_DATE_TIME_FORMATTER.pattern()); // required for backwards compatibility } else { out.writeString(formatter.pattern()); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalString(formatter.printPattern()); } out.writeString(timeZone.getId()); From e389a09640cf4d687ae5bbe59d36f5e15624e985 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Thu, 19 Oct 2023 13:31:01 -0700 Subject: [PATCH 047/178] Fix bug where retries within RemoteStoreRefreshListener cause infos/checkpoint mismatch (#10655) * Fix bug where retries within RemoteStoreRefreshListener cause mismatch between ReplicationCheckpoint and uploaded SegmentInfos. Retries within RemoteStoreRefreshListener run outside of the refresh thread. This means that concurrent refreshes may occur during syncSegments execution updating the on-reader SegmentInfos. A shard's latest ReplicationCheckpoint is computed and set in a refresh listener, but it is not guaranteed the listener has run before the retry fetches the infos or checkpoint independently. This fix ensures the listener recomputes the checkpoint while fetching the SegmentInfos. This change also ensures that we only recompute the checkpoint when necessary because it comes with an IO cost to compute StoreFileMetadata. Signed-off-by: Marc Handalian Update refresh listener to recompute checkpoint from latest infos snapshot. Signed-off-by: Marc Handalian Fix broken test case by comparing segments gen Signed-off-by: Marc Handalian spotless Signed-off-by: Marc Handalian Fix RemoteStoreRefreshListener tests Signed-off-by: Marc Handalian * add extra log Signed-off-by: Marc Handalian --------- Signed-off-by: Marc Handalian --- .../opensearch/index/shard/IndexShard.java | 67 +++++++++++-------- .../shard/RemoteStoreRefreshListener.java | 6 +- .../RemoteStoreRefreshListenerTests.java | 4 +- .../SegmentReplicationIndexShardTests.java | 27 ++++++++ 4 files changed, 70 insertions(+), 34 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 9489c7d7fc1dd..5ebfd3863a6cf 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1608,8 +1608,11 @@ public GatedCloseable acquireSafeIndexCommit() throws EngineExcepti } /** - * Compute and return the latest ReplicationCheckpoint for a particular shard. - * @return EMPTY checkpoint before the engine is opened and null for non-segrep enabled indices + * return the most recently computed ReplicationCheckpoint for a particular shard. + * The checkpoint is updated inside a refresh listener and may lag behind the SegmentInfos on the reader. + * To guarantee the checkpoint is upto date with the latest on-reader infos, use `getLatestSegmentInfosAndCheckpoint` instead. + * + * @return {@link ReplicationCheckpoint} - The most recently computed ReplicationCheckpoint. */ public ReplicationCheckpoint getLatestReplicationCheckpoint() { return replicationTracker.getLatestReplicationCheckpoint(); @@ -1628,34 +1631,12 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { public Tuple, ReplicationCheckpoint> getLatestSegmentInfosAndCheckpoint() { assert indexSettings.isSegRepEnabled(); - Tuple, ReplicationCheckpoint> nullSegmentInfosEmptyCheckpoint = new Tuple<>( - new GatedCloseable<>(null, () -> {}), - getLatestReplicationCheckpoint() - ); - - if (getEngineOrNull() == null) { - return nullSegmentInfosEmptyCheckpoint; - } // do not close the snapshot - caller will close it. GatedCloseable snapshot = null; try { snapshot = getSegmentInfosSnapshot(); - if (snapshot.get() != null) { - SegmentInfos segmentInfos = snapshot.get(); - final Map metadataMap = store.getSegmentMetadataMap(segmentInfos); - return new Tuple<>( - snapshot, - new ReplicationCheckpoint( - this.shardId, - getOperationPrimaryTerm(), - segmentInfos.getGeneration(), - segmentInfos.getVersion(), - metadataMap.values().stream().mapToLong(StoreFileMetadata::length).sum(), - getEngine().config().getCodec().getName(), - metadataMap - ) - ); - } + final SegmentInfos segmentInfos = snapshot.get(); + return new Tuple<>(snapshot, computeReplicationCheckpoint(segmentInfos)); } catch (IOException | AlreadyClosedException e) { logger.error("Error Fetching SegmentInfos and latest checkpoint", e); if (snapshot != null) { @@ -1666,7 +1647,39 @@ public Tuple, ReplicationCheckpoint> getLatestSegme } } } - return nullSegmentInfosEmptyCheckpoint; + return new Tuple<>(new GatedCloseable<>(null, () -> {}), getLatestReplicationCheckpoint()); + } + + /** + * Compute the latest {@link ReplicationCheckpoint} from a SegmentInfos. + * This function fetches a metadata snapshot from the store that comes with an IO cost. + * We will reuse the existing stored checkpoint if it is at the same SI version. + * + * @param segmentInfos {@link SegmentInfos} infos to use to compute. + * @return {@link ReplicationCheckpoint} Checkpoint computed from the infos. + * @throws IOException When there is an error computing segment metadata from the store. + */ + ReplicationCheckpoint computeReplicationCheckpoint(SegmentInfos segmentInfos) throws IOException { + if (segmentInfos == null) { + return ReplicationCheckpoint.empty(shardId); + } + final ReplicationCheckpoint latestReplicationCheckpoint = getLatestReplicationCheckpoint(); + if (latestReplicationCheckpoint.getSegmentInfosVersion() == segmentInfos.getVersion() + && latestReplicationCheckpoint.getSegmentsGen() == segmentInfos.getGeneration()) { + return latestReplicationCheckpoint; + } + final Map metadataMap = store.getSegmentMetadataMap(segmentInfos); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + this.shardId, + getOperationPrimaryTerm(), + segmentInfos.getGeneration(), + segmentInfos.getVersion(), + metadataMap.values().stream().mapToLong(StoreFileMetadata::length).sum(), + getEngine().config().getCodec().getName(), + metadataMap + ); + logger.trace("Recomputed ReplicationCheckpoint for shard {}", checkpoint); + return checkpoint; } /** diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 698e61f6f7a09..c650edc31da8d 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -181,7 +181,6 @@ private boolean syncSegments() { // in the remote store. return indexShard.state() != IndexShardState.STARTED || !(indexShard.getEngine() instanceof InternalEngine); } - ReplicationCheckpoint checkpoint = indexShard.getLatestReplicationCheckpoint(); beforeSegmentsSync(); long refreshTimeMs = segmentTracker.getLocalRefreshTimeMs(), refreshClockTimeMs = segmentTracker.getLocalRefreshClockTimeMs(); long refreshSeqNo = segmentTracker.getLocalRefreshSeqNo(); @@ -199,10 +198,7 @@ private boolean syncSegments() { try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); - assert segmentInfos.getGeneration() == checkpoint.getSegmentsGen() : "SegmentInfos generation: " - + segmentInfos.getGeneration() - + " does not match metadata generation: " - + checkpoint.getSegmentsGen(); + final ReplicationCheckpoint checkpoint = indexShard.computeReplicationCheckpoint(segmentInfos); // Capture replication checkpoint before uploading the segments as upload can take some time and checkpoint can // move. long lastRefreshedCheckpoint = ((InternalEngine) indexShard.getEngine()).lastRefreshedCheckpoint(); diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 5a13f57db2c87..51814283c5eb3 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -520,8 +520,8 @@ private Tuple mockIn if (counter.incrementAndGet() <= succeedOnAttempt) { throw new RuntimeException("Inducing failure in upload"); } - return indexShard.getLatestSegmentInfosAndCheckpoint(); - })).when(shard).getLatestSegmentInfosAndCheckpoint(); + return indexShard.getLatestReplicationCheckpoint(); + })).when(shard).computeReplicationCheckpoint(any()); doAnswer(invocation -> { if (Objects.nonNull(successLatch)) { diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 52f28aead533d..eab38bfe5c64d 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -925,6 +925,33 @@ public void testSnapshotWhileFailoverIncomplete() throws Exception { } } + public void testReuseReplicationCheckpointWhenLatestInfosIsUnChanged() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + shards.startAll(); + shards.indexDocs(10); + shards.refresh("test"); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(10); + final ReplicationCheckpoint latestReplicationCheckpoint = primaryShard.getLatestReplicationCheckpoint(); + try (GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { + assertEquals(latestReplicationCheckpoint, primaryShard.computeReplicationCheckpoint(segmentInfosSnapshot.get())); + } + final Tuple, ReplicationCheckpoint> latestSegmentInfosAndCheckpoint = primaryShard + .getLatestSegmentInfosAndCheckpoint(); + try (final GatedCloseable closeable = latestSegmentInfosAndCheckpoint.v1()) { + assertEquals(latestReplicationCheckpoint, primaryShard.computeReplicationCheckpoint(closeable.get())); + } + } + } + + public void testComputeReplicationCheckpointNullInfosReturnsEmptyCheckpoint() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + assertEquals(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard.computeReplicationCheckpoint(null)); + } + } + private SnapshotShardsService getSnapshotShardsService(IndexShard replicaShard) { final TransportService transportService = mock(TransportService.class); when(transportService.getThreadPool()).thenReturn(threadPool); From 781968b2e53f3214b73d4d8e7c1baa572b334f27 Mon Sep 17 00:00:00 2001 From: Siddhant Deshmukh Date: Thu, 19 Oct 2023 14:39:29 -0700 Subject: [PATCH 048/178] Categorize search queries by type and log query shape (#10724) * Search Query Categorizor initial skeleton using QueryBuilderVisitor Signed-off-by: Siddhant Deshmukh * Integrate metrics framework, add counters and log query shape Signed-off-by: Siddhant Deshmukh * Update changelog Signed-off-by: Siddhant Deshmukh * Add level attribute to QueryBuilderVisitor and as a tag in Counters Signed-off-by: Siddhant Deshmukh * Log query shape as debug log Signed-off-by: Siddhant Deshmukh * Integrate metrics framework, refactor code and update tests Signed-off-by: Siddhant Deshmukh * Fix build Signed-off-by: Siddhant Deshmukh * Add javadocs Signed-off-by: Siddhant Deshmukh * Minor fix Signed-off-by: Siddhant Deshmukh * Spotless check changes Signed-off-by: Siddhant Deshmukh * Address comments, add agg and sort counters, add feature flag, refactoring Signed-off-by: Siddhant Deshmukh * Build fix Signed-off-by: Siddhant Deshmukh * spotless check Signed-off-by: Siddhant Deshmukh * Fix tests Signed-off-by: Siddhant Deshmukh * Dynamic feature flag with callback Signed-off-by: Siddhant Deshmukh * Minor fix Signed-off-by: Siddhant Deshmukh * Add initialization in callback Signed-off-by: Siddhant Deshmukh * Address comments Signed-off-by: Siddhant Deshmukh * Add exception handling Signed-off-by: Siddhant Deshmukh * Refactoring and renaming Signed-off-by: Siddhant Deshmukh * Minor fix Signed-off-by: Siddhant Deshmukh * Fix changelog and minor refactoring Signed-off-by: Siddhant Deshmukh * Address review comments Signed-off-by: Siddhant Deshmukh * Add unit tests Signed-off-by: Siddhant Deshmukh * Address review comments and add complex query unit test Signed-off-by: Siddhant Deshmukh * Add sort order as a tag to sort counter Signed-off-by: Siddhant Deshmukh * Address review comments Signed-off-by: Siddhant Deshmukh * Address final comments Signed-off-by: Siddhant Deshmukh * Build fix Signed-off-by: Siddhant Deshmukh * Fix build tests failure Signed-off-by: Siddhant Deshmukh * Minor fix Signed-off-by: Siddhant Deshmukh * Minor fix Signed-off-by: Siddhant Deshmukh * Empty commit Signed-off-by: Siddhant Deshmukh * Remove extra newline Signed-off-by: Michael Froh * Empty commit Signed-off-by: Siddhant Deshmukh --------- Signed-off-by: Siddhant Deshmukh Signed-off-by: Michael Froh Co-authored-by: Michael Froh --- CHANGELOG.md | 1 + .../action/search/SearchQueryCategorizer.java | 81 +++++++ .../SearchQueryCategorizingVisitor.java | 73 ++++++ .../action/search/SearchQueryCounters.java | 117 +++++++++ .../action/search/TransportSearchAction.java | 36 ++- .../common/settings/ClusterSettings.java | 1 + .../index/query/QueryShapeVisitor.java | 86 +++++++ .../search/SearchQueryCategorizerTests.java | 228 ++++++++++++++++++ .../index/query/QueryShapeVisitorTests.java | 31 +++ .../snapshots/SnapshotResiliencyTests.java | 4 +- 10 files changed, 656 insertions(+), 2 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java create mode 100644 server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java create mode 100644 server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java create mode 100644 server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java create mode 100644 server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java create mode 100644 server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ad18b94f31b7..552c277789dd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -91,6 +91,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added - Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) - [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) +- Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) ### Dependencies - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java new file mode 100644 index 0000000000000..9cbe2d2ffcb7d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; +import org.opensearch.index.query.QueryShapeVisitor; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.sort.SortBuilder; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.List; +import java.util.ListIterator; + +/** + * Class to categorize the search queries based on the type and increment the relevant counters. + * Class also logs the query shape. + */ +final class SearchQueryCategorizer { + + private static final Logger log = LogManager.getLogger(SearchQueryCategorizer.class); + + final SearchQueryCounters searchQueryCounters; + + public SearchQueryCategorizer(MetricsRegistry metricsRegistry) { + searchQueryCounters = new SearchQueryCounters(metricsRegistry); + } + + public void categorize(SearchSourceBuilder source) { + QueryBuilder topLevelQueryBuilder = source.query(); + + logQueryShape(topLevelQueryBuilder); + incrementQueryTypeCounters(topLevelQueryBuilder); + incrementQueryAggregationCounters(source.aggregations()); + incrementQuerySortCounters(source.sorts()); + } + + private void incrementQuerySortCounters(List> sorts) { + if (sorts != null && sorts.size() > 0) { + for (ListIterator> it = sorts.listIterator(); it.hasNext();) { + SortBuilder sortBuilder = it.next(); + String sortOrder = sortBuilder.order().toString(); + searchQueryCounters.sortCounter.add(1, Tags.create().addTag("sort_order", sortOrder)); + } + } + } + + private void incrementQueryAggregationCounters(AggregatorFactories.Builder aggregations) { + if (aggregations != null) { + searchQueryCounters.aggCounter.add(1); + } + } + + private void incrementQueryTypeCounters(QueryBuilder topLevelQueryBuilder) { + if (topLevelQueryBuilder == null) { + return; + } + QueryBuilderVisitor searchQueryVisitor = new SearchQueryCategorizingVisitor(searchQueryCounters); + topLevelQueryBuilder.visit(searchQueryVisitor); + } + + private void logQueryShape(QueryBuilder topLevelQueryBuilder) { + if (topLevelQueryBuilder == null) { + return; + } + QueryShapeVisitor shapeVisitor = new QueryShapeVisitor(); + topLevelQueryBuilder.visit(shapeVisitor); + log.debug("Query shape : {}", shapeVisitor.prettyPrintTree(" ")); + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java new file mode 100644 index 0000000000000..98f0169e69a5c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.MatchPhraseQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; +import org.opensearch.index.query.MultiMatchQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; +import org.opensearch.index.query.QueryStringQueryBuilder; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.RegexpQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.WildcardQueryBuilder; +import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * Class to visit the querybuilder tree and also track the level information. + * Increments the counters related to Search Query type. + */ +final class SearchQueryCategorizingVisitor implements QueryBuilderVisitor { + private static final String LEVEL_TAG = "level"; + private final int level; + private final SearchQueryCounters searchQueryCounters; + + public SearchQueryCategorizingVisitor(SearchQueryCounters searchQueryCounters) { + this(searchQueryCounters, 0); + } + + private SearchQueryCategorizingVisitor(SearchQueryCounters counters, int level) { + this.searchQueryCounters = counters; + this.level = level; + } + + public void accept(QueryBuilder qb) { + if (qb instanceof BoolQueryBuilder) { + searchQueryCounters.boolCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof FunctionScoreQueryBuilder) { + searchQueryCounters.functionScoreCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof MatchQueryBuilder) { + searchQueryCounters.matchCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof MatchPhraseQueryBuilder) { + searchQueryCounters.matchPhrasePrefixCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof MultiMatchQueryBuilder) { + searchQueryCounters.multiMatchCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof QueryStringQueryBuilder) { + searchQueryCounters.queryStringQueryCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof RangeQueryBuilder) { + searchQueryCounters.rangeCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof RegexpQueryBuilder) { + searchQueryCounters.regexCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof TermQueryBuilder) { + searchQueryCounters.termCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof WildcardQueryBuilder) { + searchQueryCounters.wildcardCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else { + searchQueryCounters.otherQueryCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } + } + + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return new SearchQueryCategorizingVisitor(searchQueryCounters, level + 1); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java new file mode 100644 index 0000000000000..7e0259af07701 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.MetricsRegistry; + +/** + * Class contains all the Counters related to search query types. + */ +final class SearchQueryCounters { + private static final String UNIT = "1"; + private final MetricsRegistry metricsRegistry; + + // Counters related to Query types + public final Counter aggCounter; + public final Counter boolCounter; + public final Counter functionScoreCounter; + public final Counter matchCounter; + public final Counter matchPhrasePrefixCounter; + public final Counter multiMatchCounter; + public final Counter otherQueryCounter; + public final Counter queryStringQueryCounter; + public final Counter rangeCounter; + public final Counter regexCounter; + + public final Counter sortCounter; + public final Counter skippedCounter; + public final Counter termCounter; + public final Counter totalCounter; + public final Counter wildcardCounter; + + public SearchQueryCounters(MetricsRegistry metricsRegistry) { + this.metricsRegistry = metricsRegistry; + this.aggCounter = metricsRegistry.createCounter( + "search.query.type.agg.count", + "Counter for the number of top level agg search queries", + UNIT + ); + this.boolCounter = metricsRegistry.createCounter( + "search.query.type.bool.count", + "Counter for the number of top level and nested bool search queries", + UNIT + ); + this.functionScoreCounter = metricsRegistry.createCounter( + "search.query.type.functionscore.count", + "Counter for the number of top level and nested function score search queries", + UNIT + ); + this.matchCounter = metricsRegistry.createCounter( + "search.query.type.match.count", + "Counter for the number of top level and nested match search queries", + UNIT + ); + this.matchPhrasePrefixCounter = metricsRegistry.createCounter( + "search.query.type.matchphrase.count", + "Counter for the number of top level and nested match phrase prefix search queries", + UNIT + ); + this.multiMatchCounter = metricsRegistry.createCounter( + "search.query.type.multimatch.count", + "Counter for the number of top level and nested multi match search queries", + UNIT + ); + this.otherQueryCounter = metricsRegistry.createCounter( + "search.query.type.other.count", + "Counter for the number of top level and nested search queries that do not match any other categories", + UNIT + ); + this.queryStringQueryCounter = metricsRegistry.createCounter( + "search.query.type.querystringquery.count", + "Counter for the number of top level and nested queryStringQuery search queries", + UNIT + ); + this.rangeCounter = metricsRegistry.createCounter( + "search.query.type.range.count", + "Counter for the number of top level and nested range search queries", + UNIT + ); + this.regexCounter = metricsRegistry.createCounter( + "search.query.type.regex.count", + "Counter for the number of top level and nested regex search queries", + UNIT + ); + this.skippedCounter = metricsRegistry.createCounter( + "search.query.type.skipped.count", + "Counter for the number queries skipped due to error", + UNIT + ); + this.sortCounter = metricsRegistry.createCounter( + "search.query.type.sort.count", + "Counter for the number of top level sort search queries", + UNIT + ); + this.termCounter = metricsRegistry.createCounter( + "search.query.type.term.count", + "Counter for the number of top level and nested term search queries", + UNIT + ); + this.totalCounter = metricsRegistry.createCounter( + "search.query.type.total.count", + "Counter for the number of top level and nested search queries", + UNIT + ); + this.wildcardCounter = metricsRegistry.createCounter( + "search.query.type.wildcard.count", + "Counter for the number of top level and nested wildcard search queries", + UNIT + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 284f71bd9da62..a6fb8453af4ff 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -88,6 +88,7 @@ import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.metrics.MetricsRegistry; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterAware; import org.opensearch.transport.RemoteClusterService; @@ -137,6 +138,13 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_QUERY_METRICS_ENABLED_SETTING = Setting.boolSetting( + "search.query.metrics.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + // cluster level setting for timeout based search cancellation. If search request level parameter is present then that will take // precedence over the cluster setting value public static final String SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY = "search.cancel_after_time_interval"; @@ -177,8 +185,14 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new); this.client = client; @@ -211,6 +226,17 @@ public TransportSearchAction( this.isRequestStatsEnabled = clusterService.getClusterSettings().get(SEARCH_REQUEST_STATS_ENABLED); clusterService.getClusterSettings().addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setIsRequestStatsEnabled); this.searchRequestStats = searchRequestStats; + this.metricsRegistry = metricsRegistry; + this.searchQueryMetricsEnabled = clusterService.getClusterSettings().get(SEARCH_QUERY_METRICS_ENABLED_SETTING); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(SEARCH_QUERY_METRICS_ENABLED_SETTING, this::setSearchQueryMetricsEnabled); + } + + private void setSearchQueryMetricsEnabled(boolean searchQueryMetricsEnabled) { + this.searchQueryMetricsEnabled = searchQueryMetricsEnabled; + if ((this.searchQueryMetricsEnabled == true) && this.searchQueryCategorizer == null) { + this.searchQueryCategorizer = new SearchQueryCategorizer(metricsRegistry); + } } private void setIsRequestStatsEnabled(boolean isRequestStatsEnabled) { @@ -489,6 +515,14 @@ private void executeRequest( return; } + if (searchQueryMetricsEnabled) { + try { + searchQueryCategorizer.categorize(searchRequest.source()); + } catch (Exception e) { + logger.error("Error while trying to categorize the query.", e); + } + } + ActionListener rewriteListener = ActionListener.wrap(source -> { if (source != searchRequest.source()) { // only set it if it changed - we don't allow null values to be set but it might be already null. this way we catch diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 90f91dcb7c553..76883c200542e 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -377,6 +377,7 @@ public void apply(Settings value, Settings current, Settings previous) { TransportSearchAction.SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING, TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED, TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED, + TransportSearchAction.SEARCH_QUERY_METRICS_ENABLED_SETTING, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, SniffConnectionStrategy.REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java b/server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java new file mode 100644 index 0000000000000..3ba13bc7a2da4 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.common.SetOnce; + +import java.util.ArrayList; +import java.util.EnumMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * Class to traverse the QueryBuilder tree and capture the query shape + */ +public final class QueryShapeVisitor implements QueryBuilderVisitor { + private final SetOnce queryType = new SetOnce<>(); + private final Map> childVisitors = new EnumMap<>(BooleanClause.Occur.class); + + @Override + public void accept(QueryBuilder qb) { + queryType.set(qb.getName()); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + // Should get called once per Occur value + if (childVisitors.containsKey(occur)) { + throw new IllegalStateException("child visitor already called for " + occur); + } + final List childVisitorList = new ArrayList<>(); + QueryBuilderVisitor childVisitorWrapper = new QueryBuilderVisitor() { + QueryShapeVisitor currentChild; + + @Override + public void accept(QueryBuilder qb) { + currentChild = new QueryShapeVisitor(); + childVisitorList.add(currentChild); + currentChild.accept(qb); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return currentChild.getChildVisitor(occur); + } + }; + childVisitors.put(occur, childVisitorList); + return childVisitorWrapper; + } + + String toJson() { + StringBuilder outputBuilder = new StringBuilder("{\"type\":\"").append(queryType.get()).append("\""); + for (Map.Entry> entry : childVisitors.entrySet()) { + outputBuilder.append(",\"").append(entry.getKey().name().toLowerCase(Locale.ROOT)).append("\"["); + boolean first = true; + for (QueryShapeVisitor child : entry.getValue()) { + if (!first) { + outputBuilder.append(","); + } + outputBuilder.append(child.toJson()); + first = false; + } + outputBuilder.append("]"); + } + outputBuilder.append("}"); + return outputBuilder.toString(); + } + + public String prettyPrintTree(String indent) { + StringBuilder outputBuilder = new StringBuilder(indent).append(queryType.get()).append("\n"); + for (Map.Entry> entry : childVisitors.entrySet()) { + outputBuilder.append(indent).append(" ").append(entry.getKey().name().toLowerCase(Locale.ROOT)).append(":\n"); + for (QueryShapeVisitor child : entry.getValue()) { + outputBuilder.append(child.prettyPrintTree(indent + " ")); + } + } + return outputBuilder.toString(); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java new file mode 100644 index 0000000000000..a2e301143d694 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java @@ -0,0 +1,228 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.BoostingQueryBuilder; +import org.opensearch.index.query.MatchNoneQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; +import org.opensearch.index.query.MultiMatchQueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryStringQueryBuilder; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.RegexpQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.WildcardQueryBuilder; +import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.opensearch.search.aggregations.bucket.range.RangeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder; +import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.sort.ScoreSortBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.Arrays; + +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.when; + +public final class SearchQueryCategorizerTests extends OpenSearchTestCase { + + private MetricsRegistry metricsRegistry; + + private SearchQueryCategorizer searchQueryCategorizer; + + @Before + public void setup() { + metricsRegistry = mock(MetricsRegistry.class); + when(metricsRegistry.createCounter(any(String.class), any(String.class), any(String.class))).thenAnswer( + invocation -> mock(Counter.class) + ); + searchQueryCategorizer = new SearchQueryCategorizer(metricsRegistry); + } + + public void testAggregationsQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.aggregation( + new MultiTermsAggregationBuilder("agg1").terms( + Arrays.asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName("username").build(), + new MultiTermsValuesSourceConfig.Builder().setFieldName("rating").build() + ) + ) + ); + sourceBuilder.size(0); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d)); + } + + public void testBoolQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new BoolQueryBuilder().must(new MatchQueryBuilder("searchText", "fox"))); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.boolCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testFunctionScoreQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.functionScoreCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testMatchQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "php")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testMatchPhraseQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.matchPhraseQuery("tags", "php")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchPhrasePrefixCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testMultiMatchQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new MultiMatchQueryBuilder("foo bar", "myField")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.multiMatchCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testOtherQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + BoostingQueryBuilder queryBuilder = new BoostingQueryBuilder( + new TermQueryBuilder("unmapped_field", "foo"), + new MatchNoneQueryBuilder() + ); + sourceBuilder.query(queryBuilder); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.otherQueryCounter, times(2)).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testQueryStringQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder("foo:*"); + sourceBuilder.query(queryBuilder); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.queryStringQueryCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testRangeQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("date"); + rangeQuery.gte("1970-01-01"); + rangeQuery.lt("1982-01-01"); + sourceBuilder.query(rangeQuery); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.rangeCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testRegexQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.query(new RegexpQueryBuilder("field", "text")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.regexCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testSortQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "ruby")); + sourceBuilder.sort("creationDate", SortOrder.DESC); + sourceBuilder.sort(new ScoreSortBuilder()); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.sortCounter, times(2)).add(eq(1.0d), any(Tags.class)); + } + + public void testTermQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.termQuery("field", "value2")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testWildcardQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new WildcardQueryBuilder("field", "text")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.wildcardCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testComplexQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + + TermQueryBuilder termQueryBuilder = QueryBuilders.termQuery("field", "value2"); + MatchQueryBuilder matchQueryBuilder = QueryBuilders.matchQuery("tags", "php"); + RegexpQueryBuilder regexpQueryBuilder = new RegexpQueryBuilder("field", "text"); + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder().must(termQueryBuilder) + .filter(matchQueryBuilder) + .should(regexpQueryBuilder); + sourceBuilder.query(boolQueryBuilder); + sourceBuilder.aggregation(new RangeAggregationBuilder("agg1").field("num")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.regexCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.boolCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d)); + } +} diff --git a/server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java b/server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java new file mode 100644 index 0000000000000..18b814aec61c2 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.opensearch.test.OpenSearchTestCase; + +import static org.junit.Assert.assertEquals; + +public final class QueryShapeVisitorTests extends OpenSearchTestCase { + public void testQueryShapeVisitor() { + QueryBuilder builder = new BoolQueryBuilder().must(new TermQueryBuilder("foo", "bar")) + .filter(new ConstantScoreQueryBuilder(new RangeQueryBuilder("timestamp").from("12345677").to("2345678"))) + .should( + new BoolQueryBuilder().must(new MatchQueryBuilder("text", "this is some text")) + .mustNot(new RegexpQueryBuilder("color", "red.*")) + ) + .must(new TermsQueryBuilder("genre", "action", "drama", "romance")); + QueryShapeVisitor shapeVisitor = new QueryShapeVisitor(); + builder.visit(shapeVisitor); + assertEquals( + "{\"type\":\"bool\",\"must\"[{\"type\":\"term\"},{\"type\":\"terms\"}],\"filter\"[{\"type\":\"constant_score\",\"filter\"[{\"type\":\"range\"}]}],\"should\"[{\"type\":\"bool\",\"must\"[{\"type\":\"match\"}],\"must_not\"[{\"type\":\"regexp\"}]}]}", + shapeVisitor.toJson() + ); + } +} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 97c5d23831965..2f9f38d18a064 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -222,6 +222,7 @@ import org.opensearch.search.query.QueryPhase; import org.opensearch.snapshots.mockstore.MockEventuallyConsistentRepository; import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; @@ -2302,7 +2303,8 @@ public void onFailure(final Exception e) { List.of(), client ), - null + null, + NoopMetricsRegistry.INSTANCE ) ); actions.put( From 200ad5d28a577877be530ecab507601898025c5c Mon Sep 17 00:00:00 2001 From: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Date: Thu, 19 Oct 2023 19:58:13 -0700 Subject: [PATCH 049/178] Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight (#10352) * Fix timer race condition in profile rewrite and create weight for concurrent segment search (#10352) Signed-off-by: Ticheng Lin * Refactor and work on the PR comments (#10352) Signed-off-by: Ticheng Lin --------- Signed-off-by: Ticheng Lin --- CHANGELOG.md | 1 + .../search/profile/query/QueryProfilerIT.java | 157 +++++++++++++++++- .../opensearch/search/profile/Profilers.java | 7 +- .../org/opensearch/search/profile/Timer.java | 12 ++ .../query/AbstractQueryProfileTree.java | 5 +- .../ConcurrentQueryProfileBreakdown.java | 27 ++- .../query/ConcurrentQueryProfiler.java | 134 +++++++++++++++ .../search/profile/query/QueryProfiler.java | 14 +- .../ConcurrentQueryProfileBreakdownTests.java | 52 ++++++ .../query/ConcurrentQueryProfilerTests.java | 36 ++++ .../profile/query/QueryProfilerTests.java | 16 +- 11 files changed, 438 insertions(+), 23 deletions(-) create mode 100644 server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java create mode 100644 server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 552c277789dd7..5c52c43a35b8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,6 +92,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) - [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) - Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) +- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) ### Dependencies - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java index 5f794d2abf878..ef73438114079 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.profile.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; @@ -40,20 +42,23 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -61,8 +66,32 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +public class QueryProfilerIT extends ParameterizedOpenSearchIntegTestCase { + private final boolean concurrentSearchEnabled; + private static final String MAX_PREFIX = "max_"; + private static final String MIN_PREFIX = "min_"; + private static final String AVG_PREFIX = "avg_"; + private static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; + + public QueryProfilerIT(Settings settings, boolean concurrentSearchEnabled) { + super(settings); + this.concurrentSearchEnabled = concurrentSearchEnabled; + } -public class QueryProfilerIT extends OpenSearchIntegTestCase { + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build(), false }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), true } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } /** * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, @@ -229,6 +258,7 @@ public void testSimpleMatch() throws Exception { assertEquals(result.getLuceneDescription(), "field1:one"); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -271,6 +301,7 @@ public void testBool() throws Exception { assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); assertEquals(result.getProfiledChildren().size(), 2); + assertQueryProfileResult(result); // Check the children List children = result.getProfiledChildren(); @@ -282,12 +313,14 @@ public void testBool() throws Exception { assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); assertEquals(childProfile.getProfiledChildren().size(), 0); + assertQueryProfileResult(childProfile); childProfile = children.get(1); assertEquals(childProfile.getQueryName(), "TermQuery"); assertEquals(childProfile.getLuceneDescription(), "field1:two"); assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); + assertQueryProfileResult(childProfile); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -330,6 +363,7 @@ public void testEmptyBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -375,6 +409,7 @@ public void testCollapsingBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -415,6 +450,90 @@ public void testBoosting() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testSearchLeafForItsLeavesAndRewriteQuery() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = 122; + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + } + + List terms = Arrays.asList("zero", "zero", "one"); + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boostingQuery( + QueryBuilders.idsQuery().addIds(String.valueOf(randomInt()), String.valueOf(randomInt())), + QueryBuilders.termsQuery("field1", terms) + ).boost(randomFloat()).negativeBoost(randomFloat()); + logger.info("Query: {}", q); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setTrackTotalHits(true) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .get(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + assertThat(shardResult.getValue().getNetworkTime().getInboundNetworkTime(), greaterThanOrEqualTo(0L)); + assertThat(shardResult.getValue().getNetworkTime().getOutboundNetworkTime(), greaterThanOrEqualTo(0L)); + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + List results = searchProfiles.getQueryResults(); + for (ProfileResult result : results) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + Map breakdown = result.getTimeBreakdown(); + Long maxSliceTime = result.getMaxSliceTime(); + Long minSliceTime = result.getMinSliceTime(); + Long avgSliceTime = result.getAvgSliceTime(); + if (concurrentSearchEnabled && results.get(0).equals(result)) { + assertNotNull(maxSliceTime); + assertNotNull(minSliceTime); + assertNotNull(avgSliceTime); + assertThat(breakdown.size(), equalTo(66)); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { + String maxTimingType = MAX_PREFIX + queryTimingType; + String minTimingType = MIN_PREFIX + queryTimingType; + String avgTimingType = AVG_PREFIX + queryTimingType; + assertNotNull(breakdown.get(maxTimingType)); + assertNotNull(breakdown.get(minTimingType)); + assertNotNull(breakdown.get(avgTimingType)); + assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); + } + } + } else if (concurrentSearchEnabled) { + assertThat(maxSliceTime, equalTo(0L)); + assertThat(minSliceTime, equalTo(0L)); + assertThat(avgSliceTime, equalTo(0L)); + assertThat(breakdown.size(), equalTo(27)); + } else { + assertThat(maxSliceTime, is(nullValue())); + assertThat(minSliceTime, is(nullValue())); + assertThat(avgSliceTime, is(nullValue())); + assertThat(breakdown.size(), equalTo(27)); + } } CollectorResult result = searchProfiles.getCollectorResult(); @@ -455,6 +574,7 @@ public void testDisMaxRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -494,6 +614,7 @@ public void testRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -547,6 +668,7 @@ public void testPhrase() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -579,4 +701,35 @@ public void testNoProfile() throws Exception { assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); } + private void assertQueryProfileResult(ProfileResult result) { + Map breakdown = result.getTimeBreakdown(); + Long maxSliceTime = result.getMaxSliceTime(); + Long minSliceTime = result.getMinSliceTime(); + Long avgSliceTime = result.getAvgSliceTime(); + if (concurrentSearchEnabled) { + assertNotNull(maxSliceTime); + assertNotNull(minSliceTime); + assertNotNull(avgSliceTime); + assertThat(breakdown.size(), equalTo(66)); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { + String maxTimingType = MAX_PREFIX + queryTimingType; + String minTimingType = MIN_PREFIX + queryTimingType; + String avgTimingType = AVG_PREFIX + queryTimingType; + assertNotNull(breakdown.get(maxTimingType)); + assertNotNull(breakdown.get(minTimingType)); + assertNotNull(breakdown.get(avgTimingType)); + assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); + } + } + } else { + assertThat(maxSliceTime, is(nullValue())); + assertThat(minSliceTime, is(nullValue())); + assertThat(avgSliceTime, is(nullValue())); + assertThat(breakdown.size(), equalTo(27)); + } + } + } diff --git a/server/src/main/java/org/opensearch/search/profile/Profilers.java b/server/src/main/java/org/opensearch/search/profile/Profilers.java index 8e87c7ff4acd4..68cf05c988b5b 100644 --- a/server/src/main/java/org/opensearch/search/profile/Profilers.java +++ b/server/src/main/java/org/opensearch/search/profile/Profilers.java @@ -35,6 +35,9 @@ import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.profile.aggregation.AggregationProfiler; import org.opensearch.search.profile.aggregation.ConcurrentAggregationProfiler; +import org.opensearch.search.profile.query.ConcurrentQueryProfileTree; +import org.opensearch.search.profile.query.ConcurrentQueryProfiler; +import org.opensearch.search.profile.query.InternalQueryProfileTree; import org.opensearch.search.profile.query.QueryProfiler; import java.util.ArrayList; @@ -64,7 +67,9 @@ public Profilers(ContextIndexSearcher searcher, boolean isConcurrentSegmentSearc /** Switch to a new profile. */ public QueryProfiler addQueryProfiler() { - QueryProfiler profiler = new QueryProfiler(isConcurrentSegmentSearchEnabled); + QueryProfiler profiler = isConcurrentSegmentSearchEnabled + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); queryProfilers.add(profiler); return profiler; diff --git a/server/src/main/java/org/opensearch/search/profile/Timer.java b/server/src/main/java/org/opensearch/search/profile/Timer.java index 172762cabeb6a..864c689cf7fa0 100644 --- a/server/src/main/java/org/opensearch/search/profile/Timer.java +++ b/server/src/main/java/org/opensearch/search/profile/Timer.java @@ -53,6 +53,18 @@ public class Timer { private boolean doTiming; private long timing, count, lastCount, start, earliestTimerStartTime; + public Timer() { + this(0, 0, 0, 0, 0); + } + + public Timer(long timing, long count, long lastCount, long start, long earliestTimerStartTime) { + this.timing = timing; + this.count = count; + this.lastCount = lastCount; + this.start = start; + this.earliestTimerStartTime = earliestTimerStartTime; + } + /** pkg-private for testing */ long nanoTime() { return System.nanoTime(); diff --git a/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java index 8e825def13f5d..2f5d632ee2d87 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java @@ -54,14 +54,11 @@ public void startRewriteTime() { * startRewriteTime() must be called for a particular context prior to calling * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and * nonsensical - * - * @return The elapsed time */ - public long stopAndAddRewriteTime() { + public void stopAndAddRewriteTime() { long time = Math.max(1, System.nanoTime() - rewriteScratch); rewriteTime += time; rewriteScratch = 0; - return time; } public long getRewriteTime() { diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index e567fdd2d436c..59ef01f9f947a 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -70,7 +70,7 @@ public Map toBreakdownMap() { ); final long createWeightTime = topLevelBreakdownMapWithWeightTime.get(QueryTimingType.CREATE_WEIGHT.toString()); - if (sliceCollectorsToLeaves.isEmpty() || contexts.isEmpty()) { + if (contexts.isEmpty()) { // If there are no leaf contexts, then return the default concurrent query level breakdown, which will include the // create_weight time/count queryNodeTime = createWeightTime; @@ -78,6 +78,21 @@ public Map toBreakdownMap() { minSliceNodeTime = 0L; avgSliceNodeTime = 0L; return buildDefaultQueryBreakdownMap(createWeightTime); + } else if (sliceCollectorsToLeaves.isEmpty()) { + // This will happen when each slice executes search leaf for its leaves and query is rewritten for the leaf being searched. It + // creates a new weight and breakdown map for each rewritten query. This new breakdown map captures the timing information for + // the new rewritten query. The sliceCollectorsToLeaves is empty because this breakdown for rewritten query gets created later + // in search leaf path which doesn't have collector. Also, this is not needed since this breakdown is per leaf and there is no + // concurrency involved. An empty sliceCollectorsToLeaves could also happen in the case of early termination. + AbstractProfileBreakdown breakdown = contexts.values().iterator().next(); + queryNodeTime = breakdown.toNodeTime() + createWeightTime; + maxSliceNodeTime = 0L; + minSliceNodeTime = 0L; + avgSliceNodeTime = 0L; + Map queryBreakdownMap = new HashMap<>(breakdown.toBreakdownMap()); + queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT.toString(), createWeightTime); + queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX, 1L); + return queryBreakdownMap; } // first create the slice level breakdowns @@ -191,10 +206,12 @@ Map> buildSliceLevelBreakdown() { } // compute sliceMaxEndTime as max of sliceEndTime across all timing types sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, Long.MIN_VALUE)); - sliceMinStartTime = Math.min( - sliceMinStartTime, - currentSliceBreakdown.getOrDefault(timingTypeSliceStartTimeKey, Long.MAX_VALUE) - ); + long currentSliceStartTime = currentSliceBreakdown.getOrDefault(timingTypeSliceStartTimeKey, Long.MAX_VALUE); + if (currentSliceStartTime == 0L) { + // The timer for the current timing type never starts, so we continue here + continue; + } + sliceMinStartTime = Math.min(sliceMinStartTime, currentSliceStartTime); // compute total time for each timing type at slice level using sliceEndTime and sliceStartTime currentSliceBreakdown.put( timingType.toString(), diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java new file mode 100644 index 0000000000000..42bf23bb13fbe --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java @@ -0,0 +1,134 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.search.Query; +import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileResult; +import org.opensearch.search.profile.Timer; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * This class acts as a thread-local storage for profiling a query with concurrent execution + * + * @opensearch.internal + */ +public final class ConcurrentQueryProfiler extends QueryProfiler { + + private final Map threadToProfileTree; + // The LinkedList does not need to be thread safe, as the map associates thread IDs with LinkedList, and only + // one thread will access the LinkedList at a time. + private final Map> threadToRewriteTimers; + + public ConcurrentQueryProfiler(AbstractQueryProfileTree profileTree) { + super(profileTree); + long threadId = getCurrentThreadId(); + // We utilize LinkedHashMap to preserve the insertion order of the profiled queries + threadToProfileTree = Collections.synchronizedMap(new LinkedHashMap<>()); + threadToProfileTree.put(threadId, (ConcurrentQueryProfileTree) profileTree); + threadToRewriteTimers = new ConcurrentHashMap<>(); + threadToRewriteTimers.put(threadId, new LinkedList<>()); + } + + @Override + public ContextualProfileBreakdown getQueryBreakdown(Query query) { + ConcurrentQueryProfileTree profileTree = threadToProfileTree.computeIfAbsent( + getCurrentThreadId(), + k -> new ConcurrentQueryProfileTree() + ); + return profileTree.getProfileBreakdown(query); + } + + /** + * Removes the last (e.g. most recent) element on ConcurrentQueryProfileTree stack. + */ + @Override + public void pollLastElement() { + ConcurrentQueryProfileTree concurrentProfileTree = threadToProfileTree.get(getCurrentThreadId()); + if (concurrentProfileTree != null) { + concurrentProfileTree.pollLast(); + } + } + + /** + * @return a hierarchical representation of the profiled tree + */ + @Override + public List getTree() { + List profileResults = new ArrayList<>(); + for (Map.Entry profile : threadToProfileTree.entrySet()) { + profileResults.addAll(profile.getValue().getTree()); + } + return profileResults; + } + + /** + * Begin timing the rewrite phase of a request + */ + @Override + public void startRewriteTime() { + Timer rewriteTimer = new Timer(); + threadToRewriteTimers.computeIfAbsent(getCurrentThreadId(), k -> new LinkedList<>()).add(rewriteTimer); + rewriteTimer.start(); + } + + /** + * Stop recording the current rewrite timer + */ + public void stopAndAddRewriteTime() { + Timer rewriteTimer = threadToRewriteTimers.get(getCurrentThreadId()).getLast(); + rewriteTimer.stop(); + } + + /** + * @return total time taken to rewrite all queries in this concurrent query profiler + */ + @Override + public long getRewriteTime() { + long totalRewriteTime = 0L; + List rewriteTimers = new LinkedList<>(); + threadToRewriteTimers.values().forEach(rewriteTimers::addAll); + LinkedList mergedIntervals = mergeRewriteTimeIntervals(rewriteTimers); + for (long[] interval : mergedIntervals) { + totalRewriteTime += interval[1] - interval[0]; + } + return totalRewriteTime; + } + + // package private for unit testing + LinkedList mergeRewriteTimeIntervals(List timers) { + LinkedList mergedIntervals = new LinkedList<>(); + timers.sort(Comparator.comparingLong(Timer::getEarliestTimerStartTime)); + for (Timer timer : timers) { + long startTime = timer.getEarliestTimerStartTime(); + long endTime = startTime + timer.getApproximateTiming(); + if (mergedIntervals.isEmpty() || mergedIntervals.getLast()[1] < startTime) { + long[] interval = new long[2]; + interval[0] = startTime; + interval[1] = endTime; + mergedIntervals.add(interval); + } else { + mergedIntervals.getLast()[1] = Math.max(mergedIntervals.getLast()[1], endTime); + } + } + return mergedIntervals; + } + + private long getCurrentThreadId() { + return Thread.currentThread().getId(); + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java index a80ce1c658081..332c4b3551450 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java +++ b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java @@ -51,15 +51,15 @@ * * @opensearch.internal */ -public final class QueryProfiler extends AbstractProfiler, Query> { +public class QueryProfiler extends AbstractProfiler, Query> { /** * The root Collector used in the search */ private InternalProfileComponent collector; - public QueryProfiler(boolean concurrent) { - super(concurrent ? new ConcurrentQueryProfileTree() : new InternalQueryProfileTree()); + public QueryProfiler(AbstractQueryProfileTree profileTree) { + super(profileTree); } /** Set the collector that is associated with this profiler. */ @@ -81,14 +81,14 @@ public void startRewriteTime() { /** * Stop recording the current rewrite and add it's time to the total tally, returning the * cumulative time so far. - * - * @return cumulative rewrite time */ - public long stopAndAddRewriteTime() { - return ((AbstractQueryProfileTree) profileTree).stopAndAddRewriteTime(); + public void stopAndAddRewriteTime() { + ((AbstractQueryProfileTree) profileTree).stopAndAddRewriteTime(); } /** + * The rewriting process is complex and hard to display because queries can undergo significant changes. + * Instead of showing intermediate results, we display the cumulative time for the non-concurrent search case. * @return total time taken to rewrite all queries in this profile */ public long getRewriteTime() { diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java index f29ba3b0cea07..db14eb90ef839 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java @@ -333,6 +333,58 @@ public void testBreakDownMapWithMultipleSlicesAndOneSliceWithNoLeafContext() thr directory.close(); } + public void testOneLeafContextWithEmptySliceCollectorsToLeaves() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(1); + final Directory directory = directoryReader.directory(); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + final Map queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(26, queryBreakDownMap.size()); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertNotNull(queryBreakDownMap.get(timingTypeKey)); + assertNotNull(queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for current breakdown type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + } + assertEquals(0, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + private Map getLeafBreakdownMap(long startTime, long timeTaken, long count) { Map leafBreakDownMap = new HashMap<>(); for (QueryTimingType timingType : QueryTimingType.values()) { diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java new file mode 100644 index 0000000000000..736bbcdd9e8dd --- /dev/null +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.opensearch.search.profile.Timer; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.LinkedList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ConcurrentQueryProfilerTests extends OpenSearchTestCase { + + public void testMergeRewriteTimeIntervals() { + ConcurrentQueryProfiler profiler = new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()); + List timers = new LinkedList<>(); + timers.add(new Timer(217134L, 1L, 1L, 0L, 553074511206907L)); + timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287335L)); + timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287336L)); + LinkedList mergedIntervals = profiler.mergeRewriteTimeIntervals(timers); + assertThat(mergedIntervals.size(), equalTo(2)); + long[] interval = mergedIntervals.get(0); + assertThat(interval[0], equalTo(553074509287335L)); + assertThat(interval[1], equalTo(553074509516290L)); + interval = mergedIntervals.get(1); + assertThat(interval[0], equalTo(553074511206907L)); + assertThat(interval[1], equalTo(553074511424041L)); + } +} diff --git a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java index 64a440b85eb10..481a224f2ff0e 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java @@ -161,7 +161,9 @@ public void tearDown() throws Exception { } public void testBasic() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1); @@ -228,7 +230,9 @@ public void testBasic() throws IOException { } public void testNoScoring() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed @@ -295,7 +299,9 @@ public void testNoScoring() throws IOException { } public void testUseIndexStats() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.count(query); // will use index stats @@ -309,7 +315,9 @@ public void testUseIndexStats() throws IOException { } public void testApproximations() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()); searcher.count(query); From 41a12e28fff58c7262bfeb725c8a31e5ffa266f3 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Thu, 19 Oct 2023 20:33:32 -0700 Subject: [PATCH 050/178] Make RemoteStoreReplicationSource#getSegmentFiles asynchronous (#10725) * Make RemoteStoreReplicationSource#getSegmentFiles asynchronous Also make the remote store download process cancellable in case the replication event is canceled. Signed-off-by: Andrew Ross * Add ITs ensuring segRep targets are cleaned up on cancellation during metadata and segment fetch steps. Signed-off-by: Marc Handalian * Wrap metadata fetch in cancellableThreads.executeIO Signed-off-by: Marc Handalian * self review Signed-off-by: Marc Handalian * spotless Signed-off-by: Marc Handalian * Add missing node settings when bootstrapping nodes in tests. Signed-off-by: Marc Handalian --------- Signed-off-by: Andrew Ross Signed-off-by: Marc Handalian Co-authored-by: Andrew Ross --- ...emoteStoreMockRepositoryIntegTestCase.java | 5 + ...plicationUsingRemoteStoreDisruptionIT.java | 133 ++++++++++++++++++ .../store/RemoteStoreFileDownloader.java | 72 ++++++---- .../RemoteStoreReplicationSource.java | 68 +++++---- .../replication/common/ReplicationTarget.java | 11 +- .../store/RemoteStoreFileDownloaderTests.java | 125 ++++++++++++++-- .../AbstractSnapshotIntegTestCase.java | 6 + .../snapshots/mockstore/MockRepository.java | 15 ++ 8 files changed, 373 insertions(+), 62 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java index 2053800504c89..8166c0008ed83 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java @@ -114,6 +114,10 @@ protected void cleanupRepo() { } protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) { + return setup(repoLocation, ioFailureRate, skipExceptionBlobList, maxFailure, 0); + } + + protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure, int replicaCount) { // The random_control_io_exception_rate setting ensures that 10-25% of all operations to remote store results in /// IOException. skip_exception_on_verification_file & skip_exception_on_list_blobs settings ensures that the // repository creation can happen without failure. @@ -128,6 +132,7 @@ protected String setup(Path repoLocation, double ioFailureRate, String skipExcep internalCluster().startClusterManagerOnlyNode(settings.build()); String dataNodeName = internalCluster().startDataOnlyNode(settings.build()); + internalCluster().startDataOnlyNodes(replicaCount, settings.build()); createIndex(INDEX_NAME); logger.info("--> Created index={}", INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java new file mode 100644 index 0000000000000..b7b3f1d14f422 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; +import java.util.Optional; +import java.util.Set; + +/** + * This class runs tests with remote store + segRep while blocking file downloads + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SegmentReplicationUsingRemoteStoreDisruptionIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { + + @Override + public Settings indexSettings() { + return remoteStoreIndexSettings(1); + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + public void testCancelReplicationWhileSyncingSegments() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + setup(location, 0d, "metadata", Long.MAX_VALUE, 1); + + final Set dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + final String primaryNode = getNode(dataNodeNames, true); + + SegmentReplicationTargetService targetService = internalCluster().getInstance(SegmentReplicationTargetService.class, replicaNode); + ensureGreen(INDEX_NAME); + blockNodeOnAnySegmentFile(REPOSITORY_NAME, replicaNode); + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + indexSingleDoc(); + refresh(INDEX_NAME); + waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); + final SegmentReplicationState state = targetService.getOngoingEventSegmentReplicationState(indexShard.shardId()); + assertEquals(SegmentReplicationState.Stage.GET_FILES, state.getStage()); + ReplicationCollection.ReplicationRef segmentReplicationTargetReplicationRef = targetService.get( + state.getReplicationId() + ); + final SegmentReplicationTarget segmentReplicationTarget = segmentReplicationTargetReplicationRef.get(); + // close the target ref here otherwise it will hold a refcount + segmentReplicationTargetReplicationRef.close(); + assertNotNull(segmentReplicationTarget); + assertTrue(segmentReplicationTarget.refCount() > 0); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + assertBusy(() -> { + assertTrue(indexShard.routingEntry().primary()); + assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); + }); + unblockNode(REPOSITORY_NAME, replicaNode); + cleanupRepo(); + } + + public void testCancelReplicationWhileFetchingMetadata() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + setup(location, 0d, "metadata", Long.MAX_VALUE, 1); + + final Set dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + final String primaryNode = getNode(dataNodeNames, true); + + SegmentReplicationTargetService targetService = internalCluster().getInstance(SegmentReplicationTargetService.class, replicaNode); + ensureGreen(INDEX_NAME); + blockNodeOnAnyFiles(REPOSITORY_NAME, replicaNode); + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + indexSingleDoc(); + refresh(INDEX_NAME); + waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); + final SegmentReplicationState state = targetService.getOngoingEventSegmentReplicationState(indexShard.shardId()); + assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); + ReplicationCollection.ReplicationRef segmentReplicationTargetReplicationRef = targetService.get( + state.getReplicationId() + ); + final SegmentReplicationTarget segmentReplicationTarget = segmentReplicationTargetReplicationRef.get(); + // close the target ref here otherwise it will hold a refcount + segmentReplicationTargetReplicationRef.close(); + assertNotNull(segmentReplicationTarget); + assertTrue(segmentReplicationTarget.refCount() > 0); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + assertBusy(() -> { + assertTrue(indexShard.routingEntry().primary()); + assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); + }); + unblockNode(REPOSITORY_NAME, replicaNode); + cleanupRepo(); + } + + private String getNode(Set dataNodeNames, boolean primary) { + assertEquals(2, dataNodeNames.size()); + for (String name : dataNodeNames) { + final IndexShard indexShard = getIndexShard(name, INDEX_NAME); + if (indexShard.routingEntry().primary() == primary) { + return name; + } + } + return null; + } + + private IndexShard getIndexShard(String node, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); + final Optional shardId = indexService.shardIds().stream().findFirst(); + return shardId.map(indexService::getShard).orElse(null); + } +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java index 4fc721f2b96b5..727c57afd289b 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java @@ -16,7 +16,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.logging.Loggers; -import org.opensearch.common.util.concurrent.UncategorizedExecutionException; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.recovery.RecoverySettings; @@ -51,9 +51,16 @@ public RemoteStoreFileDownloader(ShardId shardId, ThreadPool threadPool, Recover * @param source The remote directory to copy segment files from * @param destination The local directory to copy segment files to * @param toDownloadSegments The list of segment files to download + * @param listener Callback listener to be notified upon completion */ - public void download(Directory source, Directory destination, Collection toDownloadSegments) throws IOException { - downloadInternal(source, destination, null, toDownloadSegments, () -> {}); + public void downloadAsync( + CancellableThreads cancellableThreads, + Directory source, + Directory destination, + Collection toDownloadSegments, + ActionListener listener + ) { + downloadInternal(cancellableThreads, source, destination, null, toDownloadSegments, () -> {}, listener); } /** @@ -74,17 +81,37 @@ public void download( Directory secondDestination, Collection toDownloadSegments, Runnable onFileCompletion - ) throws IOException { - downloadInternal(source, destination, secondDestination, toDownloadSegments, onFileCompletion); + ) throws InterruptedException, IOException { + final CancellableThreads cancellableThreads = new CancellableThreads(); + final PlainActionFuture listener = PlainActionFuture.newFuture(); + downloadInternal(cancellableThreads, source, destination, secondDestination, toDownloadSegments, onFileCompletion, listener); + try { + listener.get(); + } catch (ExecutionException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else if (e.getCause() instanceof IOException) { + throw (IOException) e.getCause(); + } + throw new RuntimeException(e); + } catch (InterruptedException e) { + // If the blocking call on the PlainActionFuture itself is interrupted, then we must + // cancel the asynchronous work we were waiting on + cancellableThreads.cancel(e.getMessage()); + Thread.currentThread().interrupt(); + throw e; + } } private void downloadInternal( + CancellableThreads cancellableThreads, Directory source, Directory destination, @Nullable Directory secondDestination, Collection toDownloadSegments, - Runnable onFileCompletion - ) throws IOException { + Runnable onFileCompletion, + ActionListener listener + ) { final Queue queue = new ConcurrentLinkedQueue<>(toDownloadSegments); // Choose the minimum of: // - number of files to download @@ -95,25 +122,14 @@ private void downloadInternal( Math.min(threadPool.info(ThreadPool.Names.REMOTE_RECOVERY).getMax(), recoverySettings.getMaxConcurrentRemoteStoreStreams()) ); logger.trace("Starting download of {} files with {} threads", queue.size(), threads); - final PlainActionFuture> listener = PlainActionFuture.newFuture(); - final ActionListener allFilesListener = new GroupedActionListener<>(listener, threads); + final ActionListener allFilesListener = new GroupedActionListener<>(ActionListener.map(listener, r -> null), threads); for (int i = 0; i < threads; i++) { - copyOneFile(source, destination, secondDestination, queue, onFileCompletion, allFilesListener); - } - try { - listener.actionGet(); - } catch (UncategorizedExecutionException e) { - // Any IOException will be double-wrapped so dig it out and throw it - if (e.getCause() instanceof ExecutionException) { - if (e.getCause().getCause() instanceof IOException) { - throw (IOException) e.getCause().getCause(); - } - } - throw e; + copyOneFile(cancellableThreads, source, destination, secondDestination, queue, onFileCompletion, allFilesListener); } } private void copyOneFile( + CancellableThreads cancellableThreads, Directory source, Directory destination, @Nullable Directory secondDestination, @@ -129,18 +145,20 @@ private void copyOneFile( threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY).submit(() -> { logger.trace("Downloading file {}", file); try { - destination.copyFrom(source, file, file, IOContext.DEFAULT); - onFileCompletion.run(); - if (secondDestination != null) { - secondDestination.copyFrom(destination, file, file, IOContext.DEFAULT); - } + cancellableThreads.executeIO(() -> { + destination.copyFrom(source, file, file, IOContext.DEFAULT); + onFileCompletion.run(); + if (secondDestination != null) { + secondDestination.copyFrom(destination, file, file, IOContext.DEFAULT); + } + }); } catch (Exception e) { // Clear the queue to stop any future processing, report the failure, then return queue.clear(); listener.onFailure(e); return; } - copyOneFile(source, destination, secondDestination, queue, onFileCompletion, listener); + copyOneFile(cancellableThreads, source, destination, secondDestination, queue, onFileCompletion, listener); }); } } diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index 12eabf1e6554f..b06b3e0497cf7 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -15,6 +15,7 @@ import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.util.Version; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -24,11 +25,14 @@ import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.stream.Collectors; @@ -43,6 +47,7 @@ public class RemoteStoreReplicationSource implements SegmentReplicationSource { private final IndexShard indexShard; private final RemoteSegmentStoreDirectory remoteDirectory; + private final CancellableThreads cancellableThreads = new CancellableThreads(); public RemoteStoreReplicationSource(IndexShard indexShard) { this.indexShard = indexShard; @@ -61,7 +66,7 @@ public void getCheckpointMetadata( // TODO: Need to figure out a way to pass this information for segment metadata via remote store. try (final GatedCloseable segmentInfosSnapshot = indexShard.getSegmentInfosSnapshot()) { final Version version = segmentInfosSnapshot.get().getCommitLuceneVersion(); - RemoteSegmentMetadata mdFile = remoteDirectory.init(); + final RemoteSegmentMetadata mdFile = getRemoteSegmentMetadata(); // During initial recovery flow, the remote store might not // have metadata as primary hasn't uploaded anything yet. if (mdFile == null && indexShard.state().equals(IndexShardState.STARTED) == false) { @@ -106,39 +111,50 @@ public void getSegmentFiles( } logger.debug("Downloading segment files from remote store {}", filesToFetch); - RemoteSegmentMetadata remoteSegmentMetadata = remoteDirectory.readLatestMetadataFile(); - Collection directoryFiles = List.of(indexShard.store().directory().listAll()); - if (remoteSegmentMetadata != null) { - try { - indexShard.store().incRef(); - indexShard.remoteStore().incRef(); - final Directory storeDirectory = indexShard.store().directory(); - final List toDownloadSegmentNames = new ArrayList<>(); - for (StoreFileMetadata fileMetadata : filesToFetch) { - String file = fileMetadata.name(); - assert directoryFiles.contains(file) == false : "Local store already contains the file " + file; - toDownloadSegmentNames.add(file); - } - indexShard.getFileDownloader() - .download( - remoteDirectory, - new ReplicationStatsDirectoryWrapper(storeDirectory, fileProgressTracker), - toDownloadSegmentNames - ); - logger.debug("Downloaded segment files from remote store {}", filesToFetch); - } finally { - indexShard.store().decRef(); - indexShard.remoteStore().decRef(); + if (remoteMetadataExists()) { + final Directory storeDirectory = indexShard.store().directory(); + final Collection directoryFiles = List.of(storeDirectory.listAll()); + final List toDownloadSegmentNames = new ArrayList<>(); + for (StoreFileMetadata fileMetadata : filesToFetch) { + String file = fileMetadata.name(); + assert directoryFiles.contains(file) == false : "Local store already contains the file " + file; + toDownloadSegmentNames.add(file); } + indexShard.getFileDownloader() + .downloadAsync( + cancellableThreads, + remoteDirectory, + new ReplicationStatsDirectoryWrapper(storeDirectory, fileProgressTracker), + toDownloadSegmentNames, + ActionListener.map(listener, r -> new GetSegmentFilesResponse(filesToFetch)) + ); + } else { + listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } - listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); - } catch (Exception e) { + } catch (IOException | RuntimeException e) { listener.onFailure(e); } } + @Override + public void cancel() { + this.cancellableThreads.cancel("Canceled by target"); + } + @Override public String getDescription() { return "RemoteStoreReplicationSource"; } + + private boolean remoteMetadataExists() throws IOException { + final AtomicBoolean metadataExists = new AtomicBoolean(false); + cancellableThreads.executeIO(() -> metadataExists.set(remoteDirectory.readLatestMetadataFile() != null)); + return metadataExists.get(); + } + + private RemoteSegmentMetadata getRemoteSegmentMetadata() throws IOException { + AtomicReference mdFile = new AtomicReference<>(); + cancellableThreads.executeIO(() -> mdFile.set(remoteDirectory.init())); + return mdFile.get(); + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index ec6b4d06b32c3..aac59df4f6573 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -91,6 +91,9 @@ public ReplicationTarget(String name, IndexShard indexShard, ReplicationLuceneIn // make sure the store is not released until we are done. this.cancellableThreads = new CancellableThreads(); store.incRef(); + if (indexShard.indexSettings().isRemoteStoreEnabled()) { + indexShard.remoteStore().incRef(); + } } public long getId() { @@ -278,6 +281,12 @@ public abstract void writeFileChunk( ); protected void closeInternal() { - store.decRef(); + try { + store.decRef(); + } finally { + if (indexShard.indexSettings().isRemoteStoreEnabled()) { + indexShard.remoteStore().decRef(); + } + } } } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java b/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java index 588d9e8bb13a2..6d8b3fe4d69fb 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java @@ -9,12 +9,18 @@ package org.opensearch.index.store; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.NIOFSDirectory; +import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.test.OpenSearchTestCase; @@ -31,8 +37,10 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; public class RemoteStoreFileDownloaderTests extends OpenSearchTestCase { @@ -76,31 +84,132 @@ public void stopThreadPool() throws Exception { } public void testDownload() throws IOException { - fileDownloader.download(source, destination, files.keySet()); + final PlainActionFuture l = new PlainActionFuture<>(); + fileDownloader.downloadAsync(new CancellableThreads(), source, destination, files.keySet(), l); + l.actionGet(); assertContent(files, destination); } - public void testDownloadWithSecondDestination() throws IOException { + public void testDownloadWithSecondDestination() throws IOException, InterruptedException { fileDownloader.download(source, destination, secondDestination, files.keySet(), () -> {}); assertContent(files, destination); assertContent(files, secondDestination); } - public void testDownloadWithFileCompletionHandler() throws IOException { + public void testDownloadWithFileCompletionHandler() throws IOException, InterruptedException { final AtomicInteger counter = new AtomicInteger(0); fileDownloader.download(source, destination, null, files.keySet(), counter::incrementAndGet); assertContent(files, destination); assertEquals(files.size(), counter.get()); } - public void testDownloadNonExistentFile() { - assertThrows(NoSuchFileException.class, () -> fileDownloader.download(source, destination, Set.of("not real"))); + public void testDownloadNonExistentFile() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + fileDownloader.downloadAsync(new CancellableThreads(), source, destination, Set.of("not real"), new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + assertEquals(NoSuchFileException.class, e.getClass()); + latch.countDown(); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); } - public void testDownloadExtraNonExistentFile() { - List filesWithExtra = new ArrayList<>(files.keySet()); + public void testDownloadExtraNonExistentFile() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + final List filesWithExtra = new ArrayList<>(files.keySet()); filesWithExtra.add("not real"); - assertThrows(NoSuchFileException.class, () -> fileDownloader.download(source, destination, filesWithExtra)); + fileDownloader.downloadAsync(new CancellableThreads(), source, destination, filesWithExtra, new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + assertEquals(NoSuchFileException.class, e.getClass()); + latch.countDown(); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + + public void testCancellable() { + final CancellableThreads cancellableThreads = new CancellableThreads(); + final PlainActionFuture blockingListener = new PlainActionFuture<>(); + final Directory blockingDestination = new FilterDirectory(destination) { + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) { + try { + Thread.sleep(60_000); // Will be interrupted + fail("Expected to be interrupted"); + } catch (InterruptedException e) { + throw new RuntimeException("Failed due to interrupt", e); + } + } + }; + fileDownloader.downloadAsync(cancellableThreads, source, blockingDestination, files.keySet(), blockingListener); + assertThrows( + "Expected to timeout due to blocking directory", + OpenSearchTimeoutException.class, + () -> blockingListener.actionGet(TimeValue.timeValueMillis(500)) + ); + cancellableThreads.cancel("test"); + assertThrows( + "Expected to complete with cancellation failure", + CancellableThreads.ExecutionCancelledException.class, + blockingListener::actionGet + ); + } + + public void testBlockingCallCanBeInterrupted() throws Exception { + final Directory blockingDestination = new FilterDirectory(destination) { + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) { + try { + Thread.sleep(60_000); // Will be interrupted + fail("Expected to be interrupted"); + } catch (InterruptedException e) { + throw new RuntimeException("Failed due to interrupt", e); + } + } + }; + final AtomicReference capturedException = new AtomicReference<>(); + final Thread thread = new Thread(() -> { + try { + fileDownloader.download(source, blockingDestination, null, files.keySet(), () -> {}); + } catch (Exception e) { + capturedException.set(e); + } + }); + thread.start(); + thread.interrupt(); + thread.join(); + assertEquals(InterruptedException.class, capturedException.get().getClass()); + } + + public void testIOException() throws IOException, InterruptedException { + final Directory failureDirectory = new FilterDirectory(destination) { + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + throw new IOException("test"); + } + }; + assertThrows(IOException.class, () -> fileDownloader.download(source, failureDirectory, null, files.keySet(), () -> {})); + + final CountDownLatch latch = new CountDownLatch(1); + fileDownloader.downloadAsync(new CancellableThreads(), source, failureDirectory, files.keySet(), new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + assertEquals(IOException.class, e.getClass()); + latch.countDown(); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); } private static void assertContent(Map expected, Directory destination) throws IOException { diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 1bb1e44a8a600..0ee889af5ce1a 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -331,6 +331,12 @@ public static void blockNodeOnAnyFiles(String repository, String nodeName) { ); } + public static void blockNodeOnAnySegmentFile(String repository, String nodeName) { + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, nodeName).repository(repository)).blockOnSegmentFiles( + true + ); + } + public static void blockDataNode(String repository, String nodeName) { ((MockRepository) internalCluster().getInstance(RepositoriesService.class, nodeName).repository(repository)).blockOnDataFiles(true); } diff --git a/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java b/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java index 7db71c4be0968..72c4ba44d0a31 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java @@ -139,6 +139,8 @@ public long getFailureCount() { private volatile boolean blockOnDataFiles; + private volatile boolean blockOnSegmentFiles; + private volatile boolean blockOnDeleteIndexN; /** @@ -190,6 +192,7 @@ public MockRepository( maximumNumberOfFailures = metadata.settings().getAsLong("max_failure_number", 100L); blockOnAnyFiles = metadata.settings().getAsBoolean("block_on_control", false); blockOnDataFiles = metadata.settings().getAsBoolean("block_on_data", false); + blockOnSegmentFiles = metadata.settings().getAsBoolean("block_on_segment", false); blockAndFailOnWriteSnapFile = metadata.settings().getAsBoolean("block_on_snap", false); randomPrefix = metadata.settings().get("random", "default"); waitAfterUnblock = metadata.settings().getAsLong("wait_after_unblock", 0L); @@ -237,6 +240,7 @@ public synchronized void unblock() { blocked = false; // Clean blocking flags, so we wouldn't try to block again blockOnDataFiles = false; + blockOnSegmentFiles = false; blockOnAnyFiles = false; blockAndFailOnWriteIndexFile = false; blockOnWriteIndexFile = false; @@ -259,6 +263,14 @@ public void setBlockOnAnyFiles(boolean blocked) { blockOnAnyFiles = blocked; } + public void blockOnSegmentFiles(boolean blocked) { + blockOnSegmentFiles = blocked; + } + + public void setBlockOnSegmentFiles(boolean blocked) { + blockOnSegmentFiles = blocked; + } + public void setBlockAndFailOnWriteSnapFiles(boolean blocked) { blockAndFailOnWriteSnapFile = blocked; } @@ -306,6 +318,7 @@ private synchronized boolean blockExecution() { boolean wasBlocked = false; try { while (blockOnDataFiles + || blockOnSegmentFiles || blockOnAnyFiles || blockAndFailOnWriteIndexFile || blockOnWriteIndexFile @@ -407,6 +420,8 @@ private void maybeIOExceptionOrBlock(String blobName) throws IOException { blockExecutionAndMaybeWait(blobName); } else if (blobName.startsWith("snap-") && blockAndFailOnWriteSnapFile) { blockExecutionAndFail(blobName); + } else if (blockOnSegmentFiles && blobName.contains(".si__")) { + blockExecutionAndMaybeWait(blobName); } } } From c400d84f0e884217454ddfcc1503d02e0b280fa9 Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Fri, 20 Oct 2023 17:39:58 +0530 Subject: [PATCH 051/178] [Remote State] fix lock release before deletion is completed (#10611) * fix lock release before deletion is completed Signed-off-by: bansvaru --- .../remote/RemoteClusterStateService.java | 8 +++-- .../RemoteClusterStateServiceTests.java | 34 +++++++++++++++++++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index b9d06c8fbb1c1..96ce2fc779ea0 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -1072,7 +1072,8 @@ public void onFailure(Exception e) { * @param clusterUUID uuid of cluster state to refer to in remote * @param manifestsToRetain no of latest manifest files to keep in remote */ - private void deleteStaleClusterMetadata(String clusterName, String clusterUUID, int manifestsToRetain) { + // package private for testing + void deleteStaleClusterMetadata(String clusterName, String clusterUUID, int manifestsToRetain) { if (deleteStaleMetadataRunning.compareAndSet(false, true) == false) { logger.info("Delete stale cluster metadata task is already in progress."); return; @@ -1109,8 +1110,9 @@ public void onFailure(Exception e) { } } ); - } finally { + } catch (Exception e) { deleteStaleMetadataRunning.set(false); + throw e; } } @@ -1190,7 +1192,7 @@ private void deleteStalePaths(String clusterName, String clusterUUID, List { String clusterName = clusterState.getClusterName().value(); - logger.info("Deleting stale cluster UUIDs data from remote [{}]", clusterName); + logger.debug("Deleting stale cluster UUIDs data from remote [{}]", clusterName); Set allClustersUUIDsInRemote; try { allClustersUUIDsInRemote = new HashSet<>(getAllClusterUUIDs(clusterState.getClusterName().value())); diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 49b7f0ff8d1a9..433eac63e9580 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -62,6 +62,9 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; import org.mockito.ArgumentCaptor; @@ -73,6 +76,7 @@ import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_FILE_PREFIX; import static org.opensearch.gateway.remote.RemoteClusterStateService.METADATA_FILE_PREFIX; +import static org.opensearch.gateway.remote.RemoteClusterStateService.RETAINED_MANIFESTS; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -1004,6 +1008,36 @@ public void testFileNames() { assertThat(splittedName[3], is("P")); } + public void testSingleConcurrentExecutionOfStaleManifestCleanup() throws Exception { + BlobContainer blobContainer = mock(BlobContainer.class); + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + + CountDownLatch latch = new CountDownLatch(1); + AtomicInteger callCount = new AtomicInteger(0); + doAnswer(invocation -> { + callCount.incrementAndGet(); + if (latch.await(5000, TimeUnit.SECONDS) == false) { + throw new Exception("Timed out waiting for delete task queuing to complete"); + } + return null; + }).when(blobContainer) + .listBlobsByPrefixInSortedOrder( + any(String.class), + any(int.class), + any(BlobContainer.BlobNameSortOrder.class), + any(ActionListener.class) + ); + + remoteClusterStateService.start(); + remoteClusterStateService.deleteStaleClusterMetadata("cluster-name", "cluster-uuid", RETAINED_MANIFESTS); + remoteClusterStateService.deleteStaleClusterMetadata("cluster-name", "cluster-uuid", RETAINED_MANIFESTS); + + latch.countDown(); + assertBusy(() -> assertEquals(1, callCount.get())); + } + private void mockObjectsForGettingPreviousClusterUUID(Map clusterUUIDsPointers) throws IOException { final BlobPath blobPath = mock(BlobPath.class); when((blobStoreRepository.basePath())).thenReturn(blobPath); From 6641ef8a0a39d86aadd272936cae25a406eb3485 Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Fri, 20 Oct 2023 18:06:40 +0530 Subject: [PATCH 052/178] Restore remote index shards with ExistingStoreRecoverySource after restore from remote state (#10665) * Restore remote index shards with ExistingStoreRecoverySource after restore from remote state Signed-off-by: bansvaru --- .../remotestore/BaseRemoteStoreRestoreIT.java | 5 +- .../RemoteStoreClusterStateRestoreIT.java | 28 ++- .../cluster/routing/IndexRoutingTable.java | 4 +- .../gateway/ClusterStateUpdaters.java | 17 +- .../recovery/RemoteStoreRestoreService.java | 30 +-- .../gateway/ClusterStateUpdatersTests.java | 237 ++---------------- 6 files changed, 53 insertions(+), 268 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java index b8481610869e6..99c5d7fb2bae7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -46,7 +46,10 @@ protected Collection> nodePlugins() { } protected void restore(String... indices) { - boolean restoreAllShards = randomBoolean(); + restore(randomBoolean(), indices); + } + + protected void restore(boolean restoreAllShards, String... indices) { if (restoreAllShards) { assertAcked(client().admin().indices().prepareClose(indices)); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 3a3e293de9b13..c2cb7cc60f152 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -65,6 +65,13 @@ private void resetCluster(int dataNodeCount, int clusterManagerNodeCount) { internalCluster().startDataOnlyNodes(dataNodeCount); } + protected void verifyRedIndicesAndTriggerRestore(Map indexStats, String indexName, boolean indexMoreDocs) + throws Exception { + ensureRed(indexName); + restore(false, indexName); + verifyRestoredData(indexStats, indexName, indexMoreDocs); + } + public void testFullClusterRestore() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; @@ -83,7 +90,7 @@ public void testFullClusterRestore() throws Exception { // Step - 3 Trigger full cluster restore and validate validateMetadata(List.of(INDEX_NAME)); - verifyRestoredData(indexStats, INDEX_NAME); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); } public void testFullClusterRestoreMultipleIndices() throws Exception { @@ -112,8 +119,8 @@ public void testFullClusterRestoreMultipleIndices() throws Exception { // Step - 3 Trigger full cluster restore validateMetadata(List.of(INDEX_NAME, secondIndexName)); - verifyRestoredData(indexStats, INDEX_NAME); - verifyRestoredData(indexStats2, secondIndexName, false); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false); + verifyRedIndicesAndTriggerRestore(indexStats2, secondIndexName, false); assertTrue(INDEX_READ_ONLY_SETTING.get(clusterService().state().metadata().index(secondIndexName).getSettings())); assertThrows(ClusterBlockException.class, () -> indexSingleDoc(secondIndexName)); // Test is complete @@ -181,7 +188,7 @@ public void testRemoteStateFullRestart() throws Exception { String newClusterUUID = clusterService().state().metadata().clusterUUID(); assert Objects.equals(newClusterUUID, prevClusterUUID) : "Full restart not successful. cluster uuid has changed"; validateCurrentMetadata(); - verifyRestoredData(indexStats, INDEX_NAME); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); } private void validateMetadata(List indexNames) { @@ -246,19 +253,18 @@ public void testFullClusterRestoreGlobalMetadata() throws Exception { // Step - 3 Trigger full cluster restore and validate // validateCurrentMetadata(); - verifyRestoredData(indexStats, INDEX_NAME, false); - - // validate global metadata restored - verifyRestoredRepositories(); - verifyRestoredIndexTemplate(); assertEquals(Integer.valueOf(34), SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(clusterService().state().metadata().settings())); assertEquals(true, SETTING_READ_ONLY_SETTING.get(clusterService().state().metadata().settings())); assertTrue(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); - // Test is complete - // Remote the cluster read only block to ensure proper cleanup updatePersistentSettings(Settings.builder().put(SETTING_READ_ONLY_SETTING.getKey(), false).build()); assertFalse(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false); + + // validate global metadata restored + verifyRestoredRepositories(); + verifyRestoredIndexTemplate(); } private void registerCustomRepository() { diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java index b12698c8a320e..d77d44580798a 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java @@ -466,12 +466,12 @@ public Builder initializeAsRemoteStoreRestore( } for (int shardNumber = 0; shardNumber < indexMetadata.getNumberOfShards(); shardNumber++) { ShardId shardId = new ShardId(index, shardNumber); - if (forceRecoverAllPrimaries == false && indexShardRoutingTableMap.containsKey(shardId) == false) { + if (indexShardRoutingTableMap.containsKey(shardId) == false) { throw new IllegalStateException("IndexShardRoutingTable is not present for shardId: " + shardId); } IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingTableMap.get(shardId); - if (forceRecoverAllPrimaries || indexShardRoutingTable == null || indexShardRoutingTable.primaryShard().unassigned()) { + if (forceRecoverAllPrimaries || indexShardRoutingTable.primaryShard().unassigned()) { // Primary shard to be recovered from remote store. indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, true, recoverySource, unassignedInfo)); // All the replica shards to be recovered from peer recovery. diff --git a/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java b/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java index 4c562b348f141..1563ac84bdd1c 100644 --- a/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java +++ b/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java @@ -41,7 +41,6 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.settings.ClusterSettings; @@ -121,21 +120,7 @@ static ClusterState updateRoutingTable(final ClusterState state) { // initialize all index routing tables as empty final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(state.routingTable()); for (final IndexMetadata cursor : state.metadata().indices().values()) { - // Whether IndexMetadata is recovered from local disk or remote it doesn't matter to us at this point. - // We are only concerned about index data recovery here. Which is why we only check for remote store enabled and not for remote - // cluster state enabled. - if (cursor.getSettings().getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false) == false - || state.routingTable().hasIndex(cursor.getIndex()) == false - || state.routingTable() - .index(cursor.getIndex()) - .shardsMatchingPredicateCount( - shardRouting -> shardRouting.primary() - // We need to ensure atleast one of the primaries is being recovered from remote. - // This ensures we have gone through the RemoteStoreRestoreService and routing table is updated - && shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource - ) == 0) { - routingTableBuilder.addAsRecovery(cursor); - } + routingTableBuilder.addAsRecovery(cursor); } // start with 0 based versions for routing table routingTableBuilder.version(0); diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index ac9cf35d1d8e5..6692d521b8f65 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -183,6 +183,7 @@ private RemoteRestoreResult executeRestore( final String restoreUUID = UUIDs.randomBase64UUID(); List indicesToBeRestored = new ArrayList<>(); int totalShards = 0; + boolean metadataFromRemoteStore = false; ClusterState.Builder builder = ClusterState.builder(currentState); Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); @@ -190,7 +191,7 @@ private RemoteRestoreResult executeRestore( for (Map.Entry> indexMetadataEntry : indexMetadataMap.entrySet()) { String indexName = indexMetadataEntry.getKey(); IndexMetadata indexMetadata = indexMetadataEntry.getValue().v2(); - boolean metadataFromRemoteStore = indexMetadataEntry.getValue().v1(); + metadataFromRemoteStore = indexMetadataEntry.getValue().v1(); IndexMetadata updatedIndexMetadata = indexMetadata; if (metadataFromRemoteStore == false && restoreAllShards) { updatedIndexMetadata = IndexMetadata.builder(indexMetadata) @@ -204,27 +205,23 @@ private RemoteRestoreResult executeRestore( IndexId indexId = new IndexId(indexName, updatedIndexMetadata.getIndexUUID()); - Map indexShardRoutingTableMap = new HashMap<>(); if (metadataFromRemoteStore == false) { - indexShardRoutingTableMap = currentState.routingTable() + Map indexShardRoutingTableMap = currentState.routingTable() .index(indexName) .shards() .values() .stream() .collect(Collectors.toMap(IndexShardRoutingTable::shardId, Function.identity())); + + RecoverySource.RemoteStoreRecoverySource recoverySource = new RecoverySource.RemoteStoreRecoverySource( + restoreUUID, + updatedIndexMetadata.getCreationVersion(), + indexId + ); + + rtBuilder.addAsRemoteStoreRestore(updatedIndexMetadata, recoverySource, indexShardRoutingTableMap, restoreAllShards); } - RecoverySource.RemoteStoreRecoverySource recoverySource = new RecoverySource.RemoteStoreRecoverySource( - restoreUUID, - updatedIndexMetadata.getCreationVersion(), - indexId - ); - rtBuilder.addAsRemoteStoreRestore( - updatedIndexMetadata, - recoverySource, - indexShardRoutingTableMap, - restoreAllShards || metadataFromRemoteStore - ); blocks.updateBlocks(updatedIndexMetadata); mdBuilder.put(updatedIndexMetadata, true); indicesToBeRestored.add(indexName); @@ -239,7 +236,10 @@ private RemoteRestoreResult executeRestore( RoutingTable rt = rtBuilder.build(); ClusterState updatedState = builder.metadata(mdBuilder).blocks(blocks).routingTable(rt).build(); - return RemoteRestoreResult.build(restoreUUID, restoreInfo, allocationService.reroute(updatedState, "restored from remote store")); + if (metadataFromRemoteStore == false) { + updatedState = allocationService.reroute(updatedState, "restored from remote store"); + } + return RemoteRestoreResult.build(restoreUUID, restoreInfo, updatedState); } private void restoreGlobalMetadata(Metadata.Builder mdBuilder, Metadata remoteMetadata) { diff --git a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java index 9b3fd45245ef7..1c43bb565ef69 100644 --- a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java @@ -41,6 +41,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.UnassignedInfo; @@ -52,12 +53,14 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.repositories.IndexId; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Function; @@ -275,7 +278,7 @@ public void testUpdateRoutingTable() { } } - public void testSkipRoutingTableUpdateWhenRemoteRecovery() { + public void testRoutingTableUpdateWhenRemoteStateRecovery() { final int numOfShards = randomIntBetween(1, 10); final IndexMetadata remoteMetadata = createIndexMetadata( @@ -286,7 +289,7 @@ public void testSkipRoutingTableUpdateWhenRemoteRecovery() { .build() ); - // Test remote index routing table is generated with ExistingStoreRecoverySource if no routing table is present + // Test remote index routing table is generated with ExistingStoreRecoverySource { final Index index = remoteMetadata.getIndex(); final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) @@ -322,48 +325,14 @@ public void testSkipRoutingTableUpdateWhenRemoteRecovery() { } - // Test remote index routing table is overridden if recovery source is not RemoteStoreRecoverySource + // Test remote index routing table is overridden if recovery source is RemoteStoreRecoverySource { - IndexRoutingTable.Builder remoteBuilderWithoutRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) - .initializeAsNew(remoteMetadata); final Index index = remoteMetadata.getIndex(); - final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) - .metadata(Metadata.builder().put(remoteMetadata, false).build()) - .routingTable(new RoutingTable.Builder().add(remoteBuilderWithoutRemoteRecovery.build()).build()) - .build(); - assertTrue(initialState.routingTable().hasIndex(index)); - final ClusterState newState = updateRoutingTable(initialState); - IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); - assertTrue(newState.routingTable().hasIndex(index)); - assertEquals( - 0, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) - ) - ); - assertEquals( - 0, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource - ) - ); - - } - - // Test routing table update is skipped for a remote index - { + Map routingTableMap = new HashMap<>(); + for (int shardNumber = 0; shardNumber < remoteMetadata.getNumberOfShards(); shardNumber++) { + ShardId shardId = new ShardId(index, shardNumber); + routingTableMap.put(shardId, new IndexShardRoutingTable.Builder(new ShardId(remoteMetadata.getIndex(), 1)).build()); + } IndexRoutingTable.Builder remoteBuilderWithRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) .initializeAsRemoteStoreRestore( remoteMetadata, @@ -372,10 +341,9 @@ public void testSkipRoutingTableUpdateWhenRemoteRecovery() { remoteMetadata.getCreationVersion(), new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) ), - new HashMap<>(), + routingTableMap, true ); - final Index index = remoteMetadata.getIndex(); final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) .metadata(Metadata.builder().put(remoteMetadata, false).build()) .routingTable(new RoutingTable.Builder().add(remoteBuilderWithRemoteRecovery.build()).build()) @@ -387,205 +355,28 @@ public void testSkipRoutingTableUpdateWhenRemoteRecovery() { assertEquals( 0, newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED) - ) - ); - assertEquals( - 0, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource - ) - ); - - } - - // Test reset routing table for 2 indices - one remote and one non remote. - // Routing table for non remote index should be updated and remote index routing table should remain intact - { - final IndexMetadata nonRemoteMetadata = createIndexMetadata( - "test-nonremote", - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards).build() - ); - IndexRoutingTable.Builder remoteBuilderWithRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) - .initializeAsRemoteStoreRestore( - remoteMetadata, - new RecoverySource.RemoteStoreRecoverySource( - UUIDs.randomBase64UUID(), - remoteMetadata.getCreationVersion(), - new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) - ), - new HashMap<>(), - true - ); - IndexRoutingTable.Builder nonRemoteBuilderWithoutRemoteRecovery = new IndexRoutingTable.Builder(nonRemoteMetadata.getIndex()) - .initializeAsNew(nonRemoteMetadata); - final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) - .metadata(Metadata.builder().put(remoteMetadata, false).build()) - .metadata(Metadata.builder().put(nonRemoteMetadata, false).build()) - .routingTable( - new RoutingTable.Builder().add(remoteBuilderWithRemoteRecovery.build()) - .add(nonRemoteBuilderWithoutRemoteRecovery.build()) - .build() - ) - .build(); - assertTrue(initialState.routingTable().hasIndex(remoteMetadata.getIndex())); - assertTrue(initialState.routingTable().hasIndex(nonRemoteMetadata.getIndex())); - final ClusterState newState = updateRoutingTable(initialState); - assertTrue(newState.routingTable().hasIndex(remoteMetadata.getIndex())); - assertTrue(newState.routingTable().hasIndex(nonRemoteMetadata.getIndex())); - IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); - IndexRoutingTable newNonRemoteIndexRoutingTable = newState.routingTable().index(nonRemoteMetadata.getIndex()); - assertEquals( - 0, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED) - ) - ); - assertEquals( - 0, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource - ) - ); - assertEquals( - 0, - newNonRemoteIndexRoutingTable.shardsMatchingPredicateCount( shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) ) ); assertEquals( numOfShards, - newNonRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) - ) - ); - assertEquals( - 0, - newNonRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource - ) - ); - assertEquals( - numOfShards, - newNonRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource - ) - ); - } - - // Test reset routing table for 2 indices, both remote backed but only once index has RemoteStoreRecoverySource. - // Routing table for only remote index without RemoteStoreRecoverySource should be updated - { - final IndexMetadata remoteWithoutRemoteRecoveryMetadata = createIndexMetadata( - "test-remote-without-recovery", - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .build() - ); - IndexRoutingTable.Builder remoteBuilderWithRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) - .initializeAsRemoteStoreRestore( - remoteMetadata, - new RecoverySource.RemoteStoreRecoverySource( - UUIDs.randomBase64UUID(), - remoteMetadata.getCreationVersion(), - new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) - ), - new HashMap<>(), - true - ); - IndexRoutingTable.Builder remoteBuilderWithoutRemoteRecovery = new IndexRoutingTable.Builder( - remoteWithoutRemoteRecoveryMetadata.getIndex() - ).initializeAsNew(remoteWithoutRemoteRecoveryMetadata); - final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) - .metadata(Metadata.builder().put(remoteMetadata, false).build()) - .metadata(Metadata.builder().put(remoteWithoutRemoteRecoveryMetadata, false).build()) - .routingTable( - new RoutingTable.Builder().add(remoteBuilderWithRemoteRecovery.build()) - .add(remoteBuilderWithoutRemoteRecovery.build()) - .build() - ) - .build(); - assertTrue(initialState.routingTable().hasIndex(remoteMetadata.getIndex())); - assertTrue(initialState.routingTable().hasIndex(remoteWithoutRemoteRecoveryMetadata.getIndex())); - final ClusterState newState = updateRoutingTable(initialState); - assertTrue(newState.routingTable().hasIndex(remoteMetadata.getIndex())); - assertTrue(newState.routingTable().hasIndex(remoteWithoutRemoteRecoveryMetadata.getIndex())); - IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); - IndexRoutingTable newRemoteWithoutRemoteRecoveryIndexRoutingTable = newState.routingTable() - .index(remoteWithoutRemoteRecoveryMetadata.getIndex()); - assertEquals( - 0, newRemoteIndexRoutingTable.shardsMatchingPredicateCount( shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) ) ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED) - ) - ); assertEquals( 0, newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource - ) - ); - assertEquals( - 0, - newRemoteWithoutRemoteRecoveryIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) - ) - ); - assertEquals( - numOfShards, - newRemoteWithoutRemoteRecoveryIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) - ) - ); - assertEquals( - 0, - newRemoteWithoutRemoteRecoveryIndexRoutingTable.shardsMatchingPredicateCount( shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource ) ); assertEquals( numOfShards, - newRemoteWithoutRemoteRecoveryIndexRoutingTable.shardsMatchingPredicateCount( + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource ) ); + } } From e12ab0f9ed5cfa32fbbcf654253180a72e50d0d7 Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Fri, 20 Oct 2023 18:18:36 +0530 Subject: [PATCH 053/178] Fix flaky remote cluster state UT (#10780) Signed-off-by: Dhwanil Patel --- .../gateway/remote/RemoteClusterStateServiceTests.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 433eac63e9580..bcc58789dd6fd 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -458,7 +458,7 @@ public void testGlobalMetadataOnlyUpdated() throws IOException { mockBlobStoreObjects(); final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); final ClusterState initialClusterState = ClusterState.builder(ClusterName.DEFAULT) - .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata).version(randomNonNegativeLong())) .build(); final ClusterMetadataManifest initialManifest = ClusterMetadataManifest.builder() .codecVersion(2) @@ -479,6 +479,7 @@ public void testGlobalMetadataOnlyUpdated() throws IOException { // new cluster state where only global metadata is different Metadata newMetadata = Metadata.builder(clusterState.metadata()) .persistentSettings(Settings.builder().put("cluster.blocks.read_only", true).build()) + .version(randomNonNegativeLong()) .build(); ClusterState newClusterState = ClusterState.builder(clusterState).metadata(newMetadata).build(); @@ -1281,7 +1282,7 @@ private static ClusterState.Builder generateClusterStateWithOneIndex() { .version(1L) .stateUUID("state-uuid") .metadata( - Metadata.builder().put(indexMetadata, true).clusterUUID("cluster-uuid").coordinationMetadata(coordinationMetadata).build() + Metadata.builder().version(randomNonNegativeLong()).put(indexMetadata, true).clusterUUID("cluster-uuid").coordinationMetadata(coordinationMetadata).build() ); } From 5093cc71476f5796f3b91fb18ff867cfe37588c0 Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Fri, 20 Oct 2023 19:18:25 +0530 Subject: [PATCH 054/178] Fix spotless failure (#10782) Signed-off-by: Dhwanil Patel --- .../gateway/remote/RemoteClusterStateServiceTests.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index bcc58789dd6fd..5202f31c514ed 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -1282,7 +1282,12 @@ private static ClusterState.Builder generateClusterStateWithOneIndex() { .version(1L) .stateUUID("state-uuid") .metadata( - Metadata.builder().version(randomNonNegativeLong()).put(indexMetadata, true).clusterUUID("cluster-uuid").coordinationMetadata(coordinationMetadata).build() + Metadata.builder() + .version(randomNonNegativeLong()) + .put(indexMetadata, true) + .clusterUUID("cluster-uuid") + .coordinationMetadata(coordinationMetadata) + .build() ); } From e691df09c66dcc1693897543fd7633c4b208ce48 Mon Sep 17 00:00:00 2001 From: rayshrey <121871912+rayshrey@users.noreply.github.com> Date: Fri, 20 Oct 2023 20:36:20 +0530 Subject: [PATCH 055/178] Add tracing instrumentation for indexing paths (#10273) * Add tracing instrumentation for indexing paths Signed-off-by: Shreyansh Ray * Fix failing tests and review changes Signed-off-by: Shreyansh Ray * Fix test failures due to Span not being properly closed Signed-off-by: Shreyansh Ray * Changes to spans in primary and replica actions Signed-off-by: Shreyansh Ray * Review comments fixes and refactoring Signed-off-by: Shreyansh Ray * Precommit auto-changes Signed-off-by: Shreyansh Ray * Add refresh policy as attribute Signed-off-by: Shreyansh Ray * Fix changelog entry Signed-off-by: Shreyansh Ray * Instrument primary/replica write in TransportWriteAction instead of TransportShardBulkAction Signed-off-by: Shreyansh Ray * Modify SpanBuilder Signed-off-by: Shreyansh Ray * spotlessApply and precommit Signed-off-by: Shreyansh Ray * Change span names Signed-off-by: Shreyansh Ray * Pass Noop Tracer instead of injected tracer Signed-off-by: Shreyansh Ray * Reverting previous changes Signed-off-by: Shreyansh Ray * Remove tracer variable from TransportShardBulkAction Signed-off-by: Shreyansh Ray --------- Signed-off-by: Shreyansh Ray --- CHANGELOG.md | 1 + .../action/bulk/TransportBulkAction.java | 110 +++++++++++------- .../action/bulk/TransportShardBulkAction.java | 7 +- .../TransportResyncReplicationAction.java | 7 +- .../replication/TransportWriteAction.java | 24 +++- .../index/seqno/RetentionLeaseSyncAction.java | 7 +- .../telemetry/tracing/AttributeNames.java | 25 ++++ .../telemetry/tracing/SpanBuilder.java | 20 ++++ ...ActionIndicesThatCannotBeCreatedTests.java | 4 +- .../bulk/TransportBulkActionIngestTests.java | 4 +- .../action/bulk/TransportBulkActionTests.java | 3 +- .../bulk/TransportBulkActionTookTests.java | 3 +- .../bulk/TransportShardBulkActionTests.java | 13 ++- ...TransportResyncReplicationActionTests.java | 6 +- ...rtWriteActionForIndexingPressureTests.java | 3 +- .../TransportWriteActionTests.java | 6 +- .../seqno/RetentionLeaseSyncActionTests.java | 12 +- .../snapshots/SnapshotResiliencyTests.java | 9 +- 18 files changed, 192 insertions(+), 72 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c52c43a35b8d..d7d492679c79d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -113,6 +113,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) - [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) - Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) +- Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index 726ba7ba119af..4a9b07c12821d 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -85,6 +85,11 @@ import org.opensearch.ingest.IngestService; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportService; @@ -133,6 +138,7 @@ public class TransportBulkAction extends HandledTransportAction() { - @Override - public void onResponse(BulkShardResponse bulkShardResponse) { - for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { - // we may have no response if item failed - if (bulkItemResponse.getResponse() != null) { - bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); - } - docStatusStats.inc(bulkItemResponse.status()); - responses.set(bulkItemResponse.getItemId(), bulkItemResponse); - } + final Span span = tracer.startSpan(SpanBuilder.from("bulkShardAction", nodeId, bulkShardRequest)); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + shardBulkAction.execute( + bulkShardRequest, + TraceableActionListener.create(ActionListener.runBefore(new ActionListener() { + @Override + public void onResponse(BulkShardResponse bulkShardResponse) { + for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { + // we may have no response if item failed + if (bulkItemResponse.getResponse() != null) { + bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); + } - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } + docStatusStats.inc(bulkItemResponse.status()); + responses.set(bulkItemResponse.getItemId(), bulkItemResponse); + } - @Override - public void onFailure(Exception e) { - // create failures for all relevant requests - for (BulkItemRequest request : requests) { - final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); - final DocWriteRequest docWriteRequest = request.request(); - final BulkItemResponse bulkItemResponse = new BulkItemResponse( - request.id(), - docWriteRequest.opType(), - new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) - ); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } - docStatusStats.inc(bulkItemResponse.status()); - responses.set(request.id(), bulkItemResponse); - } + @Override + public void onFailure(Exception e) { + // create failures for all relevant requests + for (BulkItemRequest request : requests) { + final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); + final DocWriteRequest docWriteRequest = request.request(); + final BulkItemResponse bulkItemResponse = new BulkItemResponse( + request.id(), + docWriteRequest.opType(), + new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) + ); + + docStatusStats.inc(bulkItemResponse.status()); + responses.set(request.id(), bulkItemResponse); + } - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } - private void finishHim() { - indicesService.addDocStatusStats(docStatusStats); - listener.onResponse( - new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) - ); - } - }, releasable::close)); + private void finishHim() { + indicesService.addDocStatusStats(docStatusStats); + listener.onResponse( + new BulkResponse( + responses.toArray(new BulkItemResponse[responses.length()]), + buildTookInMillis(startTimeNanos) + ) + ); + } + }, releasable::close), span, tracer) + ); + } catch (Exception e) { + span.setError(e); + span.endSpan(); + throw e; + } } bulkRequest = null; // allow memory for bulk request items to be reclaimed before all items have been completed } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index fddda0ef1f9a7..268a6ed6f85b8 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -99,6 +99,7 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportChannel; @@ -161,7 +162,8 @@ public TransportShardBulkAction( IndexingPressureService indexingPressureService, SegmentReplicationPressureService segmentReplicationPressureService, RemoteStorePressureService remoteStorePressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { super( settings, @@ -177,7 +179,8 @@ public TransportShardBulkAction( EXECUTOR_NAME_FUNCTION, false, indexingPressureService, - systemIndices + systemIndices, + tracer ); this.updateHelper = updateHelper; this.mappingUpdatedAction = mappingUpdatedAction; diff --git a/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java index 032fe83e2220b..9d60706d1f100 100644 --- a/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java @@ -54,6 +54,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportException; @@ -93,7 +94,8 @@ public TransportResyncReplicationAction( ShardStateAction shardStateAction, ActionFilters actionFilters, IndexingPressureService indexingPressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { super( settings, @@ -109,7 +111,8 @@ public TransportResyncReplicationAction( EXECUTOR_NAME_FUNCTION, true, /* we should never reject resync because of thread pool capacity on primary */ indexingPressureService, - systemIndices + systemIndices, + tracer ); } diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java index a0b5299805868..9ebfa8cfd0df8 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java @@ -59,6 +59,11 @@ import org.opensearch.index.translog.Translog.Location; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -82,6 +87,7 @@ public abstract class TransportWriteAction< protected final SystemIndices systemIndices; private final Function executorFunction; + private final Tracer tracer; protected TransportWriteAction( Settings settings, @@ -97,7 +103,8 @@ protected TransportWriteAction( Function executorFunction, boolean forceExecutionOnPrimary, IndexingPressureService indexingPressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { // We pass ThreadPool.Names.SAME to the super class as we control the dispatching to the // ThreadPool.Names.WRITE/ThreadPool.Names.SYSTEM_WRITE thread pools in this class. @@ -119,6 +126,7 @@ protected TransportWriteAction( this.executorFunction = executorFunction; this.indexingPressureService = indexingPressureService; this.systemIndices = systemIndices; + this.tracer = tracer; } protected String executor(IndexShard shard) { @@ -220,7 +228,12 @@ protected void shardOperationOnPrimary( threadPool.executor(executor).execute(new ActionRunnable>(listener) { @Override protected void doRun() { - dispatchedShardOperationOnPrimary(request, primary, listener); + Span span = tracer.startSpan( + SpanBuilder.from("dispatchedShardOperationOnPrimary", clusterService.localNode().getId(), request) + ); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + dispatchedShardOperationOnPrimary(request, primary, TraceableActionListener.create(listener, span, tracer)); + } } @Override @@ -248,7 +261,12 @@ protected void shardOperationOnReplica(ReplicaRequest request, IndexShard replic threadPool.executor(executorFunction.apply(replica)).execute(new ActionRunnable(listener) { @Override protected void doRun() { - dispatchedShardOperationOnReplica(request, replica, listener); + Span span = tracer.startSpan( + SpanBuilder.from("dispatchedShardOperationOnReplica", clusterService.localNode().getId(), request) + ); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + dispatchedShardOperationOnReplica(request, replica, TraceableActionListener.create(listener, span, tracer)); + } } @Override diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java index f74fc7eefe65c..ca3c7e1d49700 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java @@ -62,6 +62,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; @@ -99,7 +100,8 @@ public RetentionLeaseSyncAction( final ShardStateAction shardStateAction, final ActionFilters actionFilters, final IndexingPressureService indexingPressureService, - final SystemIndices systemIndices + final SystemIndices systemIndices, + final Tracer tracer ) { super( settings, @@ -115,7 +117,8 @@ public RetentionLeaseSyncAction( ignore -> ThreadPool.Names.MANAGEMENT, false, indexingPressureService, - systemIndices + systemIndices, + tracer ); } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java index a9514c298ef88..b6b2cf360d1c5 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java @@ -69,4 +69,29 @@ private AttributeNames() { * Action Name. */ public static final String TRANSPORT_ACTION = "action"; + + /** + * Index Name + */ + public static final String INDEX = "index"; + + /** + * Shard ID + */ + public static final String SHARD_ID = "shard_id"; + + /** + * Number of request items in bulk request + */ + public static final String BULK_REQUEST_ITEMS = "bulk_request_items"; + + /** + * Node ID + */ + public static final String NODE_ID = "node_id"; + + /** + * Refresh Policy + */ + public static final String REFRESH_POLICY = "refresh_policy"; } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java index d97fbd371ab2a..1dce422943b7a 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java @@ -8,6 +8,8 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.action.bulk.BulkShardRequest; +import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.common.annotation.InternalApi; import org.opensearch.core.common.Strings; import org.opensearch.http.HttpRequest; @@ -68,6 +70,10 @@ public static SpanCreationContext from(String action, Transport.Connection conne return SpanCreationContext.server().name(createSpanName(action, connection)).attributes(buildSpanAttributes(action, connection)); } + public static SpanCreationContext from(String spanName, String nodeId, ReplicatedWriteRequest request) { + return SpanCreationContext.server().name(spanName).attributes(buildSpanAttributes(nodeId, request)); + } + private static String createSpanName(HttpRequest httpRequest) { return httpRequest.method().name() + SEPARATOR + httpRequest.uri(); } @@ -150,4 +156,18 @@ private static Attributes buildSpanAttributes(String action, TcpChannel tcpChann return attributes; } + private static Attributes buildSpanAttributes(String nodeId, ReplicatedWriteRequest request) { + Attributes attributes = Attributes.create() + .addAttribute(AttributeNames.NODE_ID, nodeId) + .addAttribute(AttributeNames.REFRESH_POLICY, request.getRefreshPolicy().getValue()); + if (request.shardId() != null) { + attributes.addAttribute(AttributeNames.INDEX, request.shardId().getIndexName()) + .addAttribute(AttributeNames.SHARD_ID, request.shardId().getId()); + } + if (request instanceof BulkShardRequest) { + attributes.addAttribute(AttributeNames.BULK_REQUEST_ITEMS, ((BulkShardRequest) request).items().length); + } + return attributes; + } + } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 0f67eff26cbde..cf7080ab2fc06 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -54,6 +54,7 @@ import org.opensearch.index.VersionType; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; @@ -155,7 +156,8 @@ private void indicesThatCannotBeCreatedTestCase( new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) ), null, - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ) { @Override void executeBulk( diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 515f6eae28a34..141c630b94020 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -70,6 +70,7 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.ingest.IngestService; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; @@ -172,7 +173,8 @@ class TestTransportBulkAction extends TransportBulkAction { new ClusterService(SETTINGS, new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) ), null, - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java index 10cad6fb147a2..6bbd740df7f9c 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java @@ -118,7 +118,8 @@ class TestTransportBulkAction extends TransportBulkAction { new AutoCreateIndex(Settings.EMPTY, clusterService.getClusterSettings(), new Resolver(), new SystemIndices(emptyMap())), new IndexingPressureService(Settings.EMPTY, clusterService), mock(IndicesService.class), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java index 852e3837e1e7a..9d5b4430ea395 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java @@ -282,7 +282,8 @@ static class TestTransportBulkAction extends TransportBulkAction { new IndexingPressureService(Settings.EMPTY, clusterService), null, new SystemIndices(emptyMap()), - relativeTimeProvider + relativeTimeProvider, + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java index fe0fdd07025d9..b325cfa197933 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java @@ -88,6 +88,7 @@ import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; @@ -1074,7 +1075,8 @@ public void testHandlePrimaryTermValidationRequestWithDifferentAllocationId() { mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), mock(RemoteStorePressureService.class), - mock(SystemIndices.class) + mock(SystemIndices.class), + NoopTracer.INSTANCE ); action.handlePrimaryTermValidationRequest( new TransportShardBulkAction.PrimaryTermValidationRequest(aId + "-1", 1, shardId), @@ -1105,7 +1107,8 @@ public void testHandlePrimaryTermValidationRequestWithOlderPrimaryTerm() { mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), mock(RemoteStorePressureService.class), - mock(SystemIndices.class) + mock(SystemIndices.class), + NoopTracer.INSTANCE ); action.handlePrimaryTermValidationRequest( new TransportShardBulkAction.PrimaryTermValidationRequest(aId, 1, shardId), @@ -1136,7 +1139,8 @@ public void testHandlePrimaryTermValidationRequestSuccess() { mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), mock(RemoteStorePressureService.class), - mock(SystemIndices.class) + mock(SystemIndices.class), + NoopTracer.INSTANCE ); action.handlePrimaryTermValidationRequest( new TransportShardBulkAction.PrimaryTermValidationRequest(aId, 1, shardId), @@ -1178,7 +1182,8 @@ private TransportShardBulkAction createAction() { mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), mock(RemoteStorePressureService.class), - mock(SystemIndices.class) + mock(SystemIndices.class), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index 3bd8930064563..da87a0a967f53 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -203,7 +203,8 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { shardStateAction, new ActionFilters(new HashSet<>()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); assertThat(action.globalBlockLevel(), nullValue()); @@ -256,7 +257,8 @@ private TransportResyncReplicationAction createAction() { mock(ShardStateAction.class), new ActionFilters(new HashSet<>()), mock(IndexingPressureService.class), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } } diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java index 4a2185d1558f7..7212b1f5efe13 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java @@ -392,7 +392,8 @@ protected TestAction( ignore -> ThreadPool.Names.SAME, false, TransportWriteActionForIndexingPressureTests.this.indexingPressureService, - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java index 9d2069ac16190..b4549f82230bf 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java @@ -477,7 +477,8 @@ protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentF ignore -> ThreadPool.Names.SAME, false, new IndexingPressureService(Settings.EMPTY, TransportWriteActionTests.this.clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); this.withDocumentFailureOnPrimary = withDocumentFailureOnPrimary; this.withDocumentFailureOnReplica = withDocumentFailureOnReplica; @@ -505,7 +506,8 @@ protected TestAction( ignore -> ThreadPool.Names.SAME, false, new IndexingPressureService(settings, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); this.withDocumentFailureOnPrimary = false; this.withDocumentFailureOnReplica = false; diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java index d9bca55a208c2..63a9ac2f2e8ec 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -125,7 +125,8 @@ public void testRetentionLeaseSyncActionOnPrimary() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); final RetentionLeases retentionLeases = mock(RetentionLeases.class); final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); @@ -162,7 +163,8 @@ public void testRetentionLeaseSyncActionOnReplica() throws Exception { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); final RetentionLeases retentionLeases = mock(RetentionLeases.class); final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); @@ -203,7 +205,8 @@ public void testBlocks() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); assertNull(action.indexBlockLevel()); @@ -233,7 +236,8 @@ private RetentionLeaseSyncAction createAction() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 2f9f38d18a064..710717532ceb4 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2124,7 +2124,8 @@ public void onFailure(final Exception e) { shardStateAction, actionFilters, new IndexingPressureService(settings, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ) ), new GlobalCheckpointSyncAction( @@ -2190,7 +2191,8 @@ public void onFailure(final Exception e) { mock(ThreadPool.class) ), mock(RemoteStorePressureService.class), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); actions.put( BulkAction.INSTANCE, @@ -2214,7 +2216,8 @@ public void onFailure(final Exception e) { new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver, new SystemIndices(emptyMap())), new IndexingPressureService(settings, clusterService), mock(IndicesService.class), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ) ); final RestoreService restoreService = new RestoreService( From a1fde65fe2ad1acda4364cf5fb751e9b494327ce Mon Sep 17 00:00:00 2001 From: Ashish Date: Fri, 20 Oct 2023 21:06:42 +0530 Subject: [PATCH 056/178] [Remote Store] Fix relocation failure due to transport receive timeout (#10761) * [Remote Store] Fix relocation failure due to transport receive timeout Signed-off-by: Ashish Singh * Fix existing extended shardIdle for remote backed shards Signed-off-by: Ashish Singh * Incorporate PR review comments Signed-off-by: Ashish Singh --------- Signed-off-by: Ashish Singh --- .../opensearch/remotestore/RemoteStoreIT.java | 23 +++++++++++++++++++ .../org/opensearch/index/IndexSettings.java | 3 +++ .../opensearch/index/shard/IndexShard.java | 6 ++++- .../translog/InternalTranslogManager.java | 10 ++++---- .../index/translog/RemoteFsTranslog.java | 5 ++++ .../opensearch/index/translog/Translog.java | 4 ++++ .../recovery/PeerRecoverySourceService.java | 3 ++- .../recovery/RemoteRecoveryTargetHandler.java | 13 +++++++++-- .../index/shard/RemoteIndexShardTests.java | 9 ++++++++ .../SegmentReplicationIndexShardTests.java | 12 ++++++---- 10 files changed, 75 insertions(+), 13 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 1fb5c2052aded..b3b4f8e10fd31 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -509,4 +509,27 @@ public void testRestoreSnapshotToIndexWithSameNameDifferentUUID() throws Excepti assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); }); } + + public void testNoSearchIdleForAnyReplicaCount() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + String primaryShardNode = internalCluster().startDataOnlyNodes(1).get(0); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + IndexShard indexShard = getIndexShard(primaryShardNode); + assertFalse(indexShard.isSearchIdleSupported()); + + String replicaShardNode = internalCluster().startDataOnlyNodes(1).get(0); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + ensureGreen(INDEX_NAME); + assertFalse(indexShard.isSearchIdleSupported()); + + indexShard = getIndexShard(replicaShardNode); + assertFalse(indexShard.isSearchIdleSupported()); + } } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index e90e9259f6a5c..99d2b5a74c406 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -1024,6 +1024,9 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti } private void setSearchIdleAfter(TimeValue searchIdleAfter) { + if (this.isRemoteStoreEnabled) { + logger.warn("Search idle is not supported for remote backed indices"); + } if (this.replicationType == ReplicationType.SEGMENT && this.getNumberOfReplicas() > 0) { logger.warn("Search idle is not supported for indices with replicas using 'replication.type: SEGMENT'"); } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 5ebfd3863a6cf..1b7d1b2716979 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4425,7 +4425,6 @@ public final boolean isSearchIdle() { } /** - * * Returns true if this shard supports search idle. *

* Indices using Segment Replication will ignore search idle unless there are no replicas. @@ -4434,6 +4433,11 @@ public final boolean isSearchIdle() { * a new set of segments. */ public final boolean isSearchIdleSupported() { + // If the index is remote store backed, then search idle is not supported. This is to ensure that async refresh + // task continues to upload to remote store periodically. + if (isRemoteTranslogEnabled()) { + return false; + } return indexSettings.isSegRepEnabled() == false || indexSettings.getNumberOfReplicas() == 0; } diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index 85c52b907d326..4d0fc13d433c6 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -430,10 +430,10 @@ public String getTranslogUUID() { * @return if the translog should be flushed */ public boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long flushThreshold) { - final long translogGenerationOfLastCommit = translog.getMinGenerationForSeqNo( - localCheckpointOfLastCommit + 1 - ).translogFileGeneration; - if (translog.sizeInBytesByMinGen(translogGenerationOfLastCommit) < flushThreshold) { + // This is the minimum seqNo that is referred in translog and considered for calculating translog size + long minTranslogRefSeqNo = translog.getMinUnreferencedSeqNoInSegments(localCheckpointOfLastCommit + 1); + final long minReferencedTranslogGeneration = translog.getMinGenerationForSeqNo(minTranslogRefSeqNo).translogFileGeneration; + if (translog.sizeInBytesByMinGen(minReferencedTranslogGeneration) < flushThreshold) { return false; } /* @@ -454,7 +454,7 @@ public boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long fl final long translogGenerationOfNewCommit = translog.getMinGenerationForSeqNo( localCheckpointTrackerSupplier.get().getProcessedCheckpoint() + 1 ).translogFileGeneration; - return translogGenerationOfLastCommit < translogGenerationOfNewCommit + return minReferencedTranslogGeneration < translogGenerationOfNewCommit || localCheckpointTrackerSupplier.get().getProcessedCheckpoint() == localCheckpointTrackerSupplier.get().getMaxSeqNo(); } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 29c825fd383c5..2dd9b1a545d4a 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -544,4 +544,9 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) thro } } } + + @Override + public long getMinUnreferencedSeqNoInSegments(long minUnrefCheckpointInLastCommit) { + return minSeqNoToKeep; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index cf7f18840a03e..b44aa6e059224 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -2034,4 +2034,8 @@ public static String createEmptyTranslog( writer.close(); return uuid; } + + public long getMinUnreferencedSeqNoInSegments(long minUnrefCheckpointInLastCommit) { + return minUnrefCheckpointInLastCommit; + } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java index 6c7632a8a408d..cb2bedf00de99 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java @@ -376,7 +376,8 @@ private Tuple createRecovery transportService, request.targetNode(), recoverySettings, - throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime) + throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime), + shard.isRemoteTranslogEnabled() ); handler = RecoverySourceHandlerFactory.create(shard, recoveryTarget, request, recoverySettings); return Tuple.tuple(handler, recoveryTarget); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java index 66f5b13449f05..37227596fdfe7 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -75,6 +75,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { private final AtomicLong requestSeqNoGenerator = new AtomicLong(0); private final RetryableTransportClient retryableTransportClient; private final RemoteSegmentFileChunkWriter fileChunkWriter; + private final boolean remoteStoreEnabled; public RemoteRecoveryTargetHandler( long recoveryId, @@ -82,7 +83,8 @@ public RemoteRecoveryTargetHandler( TransportService transportService, DiscoveryNode targetNode, RecoverySettings recoverySettings, - Consumer onSourceThrottle + Consumer onSourceThrottle, + boolean remoteStoreEnabled ) { this.transportService = transportService; // It is safe to pass the retry timeout value here because RemoteRecoveryTargetHandler @@ -111,6 +113,7 @@ public RemoteRecoveryTargetHandler( requestSeqNoGenerator, onSourceThrottle ); + this.remoteStoreEnabled = remoteStoreEnabled; } public DiscoveryNode targetNode() { @@ -129,7 +132,13 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener reader = in -> TransportResponse.Empty.INSTANCE; final ActionListener responseListener = ActionListener.map(listener, r -> null); - retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); + if (remoteStoreEnabled) { + // If remote store is enabled, during the prepare_translog phase, translog is also downloaded on the + // target host along with incremental segments download. + retryableTransportClient.executeRetryableAction(action, request, translogOpsRequestOptions, responseListener, reader); + } else { + retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); + } } @Override diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index fe389e3b3fcb4..703a7d457d5b6 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -471,6 +471,15 @@ public void onReplicationFailure( } } + @Override + protected void validateShardIdleWithNoReplicas(IndexShard primary) { + // ensure search idle conditions are met. + assertFalse(primary.isSearchIdleSupported()); + assertTrue(primary.isSearchIdle()); + assertTrue(primary.scheduledRefresh()); + assertFalse(primary.hasRefreshPending()); + } + private void assertSingleSegmentFile(IndexShard shard, String fileName) throws IOException { final Set segmentsFileNames = Arrays.stream(shard.store().directory().listAll()) .filter(file -> file.startsWith(IndexFileNames.SEGMENTS)) diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index eab38bfe5c64d..7caff3e5f5479 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -436,13 +436,17 @@ public void testShardIdleWithNoReplicas() throws Exception { shards.startAll(); final IndexShard primary = shards.getPrimary(); shards.indexDocs(randomIntBetween(1, 10)); - // ensure search idle conditions are met. - assertTrue(primary.isSearchIdle()); - assertFalse(primary.scheduledRefresh()); - assertTrue(primary.hasRefreshPending()); + validateShardIdleWithNoReplicas(primary); } } + protected void validateShardIdleWithNoReplicas(IndexShard primary) { + // ensure search idle conditions are met. + assertTrue(primary.isSearchIdle()); + assertFalse(primary.scheduledRefresh()); + assertTrue(primary.hasRefreshPending()); + } + /** * here we are starting a new primary shard in PrimaryMode and testing if the shard publishes checkpoint after refresh. */ From ffe9371b38f98a2c9314bb6cfed4f8f224ff085c Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 20 Oct 2023 12:40:04 -0400 Subject: [PATCH 057/178] Update Github pull request template to have a task for inspecting failing checks (#10751) * Update Github pull request template to have a task for inspecting failing checks Signed-off-by: Andriy Redko * Add failing builds troubleshooting section to CONTRIBUTING.md Signed-off-by: Andriy Redko * Address review comments Signed-off-by: Andriy Redko --------- Signed-off-by: Andriy Redko --- .github/pull_request_template.md | 1 + CONTRIBUTING.md | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index c47b9e0b69256..908a032bf833e 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,6 +17,7 @@ Resolves #[Issue number to be closed when this PR is merged] - [ ] All tests pass - [ ] New functionality has been documented. - [ ] New functionality has javadoc added +- [ ] Failing checks are inspected and point to the corresponding known issue(s) (See: [Troubleshooting Failing Builds](../blob/main/CONTRIBUTING.md#troubleshooting-failing-builds)) - [ ] Commits are signed per the DCO using --signoff - [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) - [ ] Public documentation issue/PR [created](https://github.com/opensearch-project/documentation-website/issues/new/choose) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d379d78829318..4a1162cf2558b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,6 +8,7 @@ - [Developer Certificate of Origin](#developer-certificate-of-origin) - [Changelog](#changelog) - [Review Process](#review-process) + - [Troubleshooting Failing Builds](#troubleshooting-failing-builds) # Contributing to OpenSearch @@ -162,3 +163,14 @@ During the PR process, expect that there will be some back-and-forth. Please try If we accept the PR, a [maintainer](MAINTAINERS.md) will merge your change and usually take care of backporting it to appropriate branches ourselves. If we reject the PR, we will close the pull request with a comment explaining why. This decision isn't always final: if you feel we have misunderstood your intended change or otherwise think that we should reconsider then please continue the conversation with a comment on the PR and we'll do our best to address any further points you raise. + +## Troubleshooting Failing Builds + +The OpenSearch testing framework offers many capabilities but exhibits significant complexity (it does lot of randomization internally to cover as many edge cases and variations as possible). Unfortunately, this posses a challenge by making it harder to discover important issues/bugs in straightforward way and may lead to so called flaky tests - the tests which flip randomly from success to failure without any code changes. + +If your pull request reports a failing test(s) on one of the checks, please: + - look if there is an existing [issue](https://github.com/opensearch-project/OpenSearch/issues) reported for the test in question + - if not, please make sure this is not caused by your changes, run the failing test(s) locally for some time + - if you are sure the failure is not related, please open a new [bug](https://github.com/opensearch-project/OpenSearch/issues/new?assignees=&labels=bug%2C+untriaged&projects=&template=bug_template.md&title=%5BBUG%5D) with `flaky-test` label + - add a comment referencing the issue(s) or bug report(s) to your pull request explaining the failing build(s) + - as a bonus point, try to contribute by fixing the flaky test(s) From 1e28738b8c966011bf1ae1f00431f0377761cb0a Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Fri, 20 Oct 2023 12:17:51 -0500 Subject: [PATCH 058/178] Increase remote recovery thread pool size (#10750) The remote recovery thread pool does blocking I/O when downloading files, so the "half processor count max 10" was definitely too small. This can be shown by triggering recoveries on a node that is also doing segment replication, and the replication lag will increase due to contention on that thread pool. Some amount of contention is inevitable, but the change here to increase the download thread pool, and also limit the concurrent usage of that thread pool by any single recovery/replication to 25% of the threads does help. Long term, we can improve this even further by moving to fully async I/O to avoid blocking threads in the application on draining InputStreams. Signed-off-by: Andrew Ross --- .../org/opensearch/indices/recovery/RecoverySettings.java | 7 ++++--- .../main/java/org/opensearch/threadpool/ThreadPool.java | 7 ++++++- .../org/opensearch/threadpool/ScalingThreadPoolTests.java | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index 44dfb2f4cb00a..0f3025369833d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; @@ -87,10 +88,10 @@ public class RecoverySettings { /** * Controls the maximum number of streams that can be started concurrently per recovery when downloading from the remote store. */ - public static final Setting INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING = Setting.intSetting( + public static final Setting INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING = new Setting<>( "indices.recovery.max_concurrent_remote_store_streams", - 10, - 1, + (s) -> Integer.toString(Math.max(1, OpenSearchExecutors.allocatedProcessors(s) / 2)), + (s) -> Setting.parseInt(s, 1, "indices.recovery.max_concurrent_remote_store_streams"), Property.Dynamic, Property.NodeScope ); diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index fab7620292dd2..5f10986239300 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -273,7 +273,12 @@ public ThreadPool( ); builders.put( Names.REMOTE_RECOVERY, - new ScalingExecutorBuilder(Names.REMOTE_RECOVERY, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5)) + new ScalingExecutorBuilder( + Names.REMOTE_RECOVERY, + 1, + twiceAllocatedProcessors(allocatedProcessors), + TimeValue.timeValueMinutes(5) + ) ); if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { builders.put( diff --git a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java index ba2d4b8c247bb..19271bbf30e80 100644 --- a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java @@ -154,7 +154,7 @@ private int expectedSize(final String threadPoolName, final int numberOfProcesso sizes.put(ThreadPool.Names.TRANSLOG_SYNC, n -> 4 * n); sizes.put(ThreadPool.Names.REMOTE_PURGE, ThreadPool::halfAllocatedProcessorsMaxFive); sizes.put(ThreadPool.Names.REMOTE_REFRESH_RETRY, ThreadPool::halfAllocatedProcessorsMaxTen); - sizes.put(ThreadPool.Names.REMOTE_RECOVERY, ThreadPool::halfAllocatedProcessorsMaxTen); + sizes.put(ThreadPool.Names.REMOTE_RECOVERY, ThreadPool::twiceAllocatedProcessors); return sizes.get(threadPoolName).apply(numberOfProcessors); } From 4f8bcff554fe3b019d4149ddcdcc634120c67dd3 Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Fri, 20 Oct 2023 15:57:17 -0700 Subject: [PATCH 059/178] Return unformatted Segment Replication metrics that take upload time into account for replication lag (#10723) * Return unformatted segrep metrics in stats Signed-off-by: Poojita Raj * Take upload time into account for replication time lag Signed-off-by: Poojita Raj * unformat segrep stats Signed-off-by: Poojita Raj * remove unused field names Signed-off-by: Poojita Raj --------- Signed-off-by: Poojita Raj --- .../main/java/org/opensearch/index/ReplicationStats.java | 8 +++----- .../main/java/org/opensearch/index/shard/IndexShard.java | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/ReplicationStats.java b/server/src/main/java/org/opensearch/index/ReplicationStats.java index 9cc6685c75f80..0ae4526365bf1 100644 --- a/server/src/main/java/org/opensearch/index/ReplicationStats.java +++ b/server/src/main/java/org/opensearch/index/ReplicationStats.java @@ -8,11 +8,9 @@ package org.opensearch.index; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -76,9 +74,9 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.SEGMENT_REPLICATION); - builder.field(Fields.MAX_BYTES_BEHIND, new ByteSizeValue(maxBytesBehind).toString()); - builder.field(Fields.TOTAL_BYTES_BEHIND, new ByteSizeValue(totalBytesBehind).toString()); - builder.field(Fields.MAX_REPLICATION_LAG, new TimeValue(maxReplicationLag)); + builder.field(Fields.MAX_BYTES_BEHIND, maxBytesBehind); + builder.field(Fields.TOTAL_BYTES_BEHIND, totalBytesBehind); + builder.field(Fields.MAX_REPLICATION_LAG, maxReplicationLag); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 1b7d1b2716979..f990a3b56e856 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3010,7 +3010,7 @@ public ReplicationStats getReplicationStats() { long maxBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).max().orElse(0L); long totalBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).sum(); long maxReplicationLag = stats.stream() - .mapToLong(SegmentReplicationShardStats::getCurrentReplicationTimeMillis) + .mapToLong(SegmentReplicationShardStats::getCurrentReplicationLagMillis) .max() .orElse(0L); return new ReplicationStats(maxBytesBehind, totalBytesBehind, maxReplicationLag); From 1e9ec52dd9d5e7d4a3ffb7d37c8b7fdf4069c26d Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Fri, 20 Oct 2023 16:10:31 -0700 Subject: [PATCH 060/178] [Segment Replication] Fix Flaky test SegmentReplicationRelocationIT.testPrimaryRelocation (#10701) * Add primary mode check before assserting on primary mode. Signed-off-by: Rishikesh1159 * remove unnecessary shardRouting check. Signed-off-by: Rishikesh1159 * Add test logging. Signed-off-by: Rishikesh1159 * Addressing comments on PR. Signed-off-by: Rishikesh1159 --------- Signed-off-by: Rishikesh1159 --- .../replication/SegmentReplicationRelocationIT.java | 2 ++ .../replication/SegmentReplicationSourceHandler.java | 9 --------- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java index dd832a63d1e66..dbe0b43441f54 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java @@ -26,6 +26,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; @@ -55,6 +56,7 @@ private void createIndex(int replicaCount) { * This test verifies happy path when primary shard is relocated newly added node (target) in the cluster. Before * relocation and after relocation documents are indexed and documents are verified */ + @TestLogging(reason = "Getting trace logs from replication,shard and allocation package", value = "org.opensearch.indices.replication:TRACE, org.opensearch.index.shard:TRACE, org.opensearch.cluster.routing.allocation:TRACE") public void testPrimaryRelocation() throws Exception { final String oldPrimary = internalCluster().startNode(); createIndex(1); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index e2c47b0fb3159..674c09311c645 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -12,8 +12,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.StepListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.ListenableFuture; @@ -22,7 +20,6 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.StoreFileMetadata; -import org.opensearch.indices.recovery.DelayRecoveryException; import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.MultiChunkTransfer; import org.opensearch.indices.replication.common.CopyState; @@ -146,12 +143,6 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene ); }; cancellableThreads.checkForCancel(); - final IndexShardRoutingTable routingTable = shard.getReplicationGroup().getRoutingTable(); - ShardRouting targetShardRouting = routingTable.getByAllocationId(request.getTargetAllocationId()); - if (targetShardRouting == null) { - logger.debug("delaying replication of {} as it is not listed as assigned to target node {}", shard.shardId(), targetNode); - throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node"); - } final StepListener sendFileStep = new StepListener<>(); Set storeFiles = new HashSet<>(Arrays.asList(shard.store().directory().listAll())); From 51626d03f857ca840280d1e57cfb1bdfbba75e2d Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Fri, 20 Oct 2023 17:13:43 -0700 Subject: [PATCH 061/178] [Segment Replication] Add Segment Replication backpressure rejection stats to _nodes/stats (#10656) * Initial WIP for adding segrep backpressure to node stats. Signed-off-by: Rishikesh1159 * Bind SegmentReplicarionStatsTracker in Node.java Signed-off-by: Rishikesh1159 * remove additional segrep backpressure info from node stats Signed-off-by: Rishikesh1159 * fix metric name in node stats Signed-off-by: Rishikesh1159 * Fix compile error. Signed-off-by: Rishikesh1159 * Fix compile errors. Signed-off-by: Rishikesh1159 * Address comments on PR. Signed-off-by: Rishikesh1159 * Update java docs. Signed-off-by: Rishikesh1159 * Address comments on PR and fix compile errors. Signed-off-by: Rishikesh1159 * Address comments on PR. Signed-off-by: Rishikesh1159 * Update unit test. Signed-off-by: Rishikesh1159 --------- Signed-off-by: Rishikesh1159 Signed-off-by: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> --- .../admin/cluster/node/stats/NodeStats.java | 24 +++++++ .../cluster/node/stats/NodesStatsRequest.java | 1 + .../node/stats/TransportNodesStatsAction.java | 1 + .../stats/TransportClusterStatsAction.java | 1 + .../SegmentReplicationPressureService.java | 3 +- .../SegmentReplicationRejectionStats.java | 67 +++++++++++++++++++ .../index/SegmentReplicationStatsTracker.java | 8 +++ .../main/java/org/opensearch/node/Node.java | 4 ++ .../java/org/opensearch/node/NodeService.java | 7 ++ .../cluster/node/stats/NodeStatsTests.java | 18 +++++ .../opensearch/cluster/DiskUsageTests.java | 6 ++ ...egmentReplicationPressureServiceTests.java | 9 ++- .../SegmentReplicationStatsTrackerTests.java | 35 ++++++++++ .../snapshots/SnapshotResiliencyTests.java | 2 + .../MockInternalClusterInfoService.java | 1 + .../opensearch/test/InternalTestCluster.java | 1 + 16 files changed, 186 insertions(+), 2 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java create mode 100644 server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index e9bfa358103c8..3d37056956c69 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -46,6 +46,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.discovery.DiscoveryStats; import org.opensearch.http.HttpStats; +import org.opensearch.index.SegmentReplicationRejectionStats; import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.index.stats.ShardIndexingPressureStats; import org.opensearch.index.store.remote.filecache.FileCacheStats; @@ -129,6 +130,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private SearchBackpressureStats searchBackpressureStats; + @Nullable + private SegmentReplicationRejectionStats segmentReplicationRejectionStats; + @Nullable private ClusterManagerThrottlingStats clusterManagerThrottlingStats; @@ -211,6 +215,12 @@ public NodeStats(StreamInput in) throws IOException { } else { resourceUsageStats = null; } + // TODO: change to V_2_12_0 on main after backport to 2.x + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + segmentReplicationRejectionStats = in.readOptionalWriteable(SegmentReplicationRejectionStats::new); + } else { + segmentReplicationRejectionStats = null; + } if (in.getVersion().onOrAfter(Version.V_2_12_0)) { repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); } else { @@ -244,6 +254,7 @@ public NodeStats( @Nullable FileCacheStats fileCacheStats, @Nullable TaskCancellationStats taskCancellationStats, @Nullable SearchPipelineStats searchPipelineStats, + @Nullable SegmentReplicationRejectionStats segmentReplicationRejectionStats, @Nullable RepositoriesStats repositoriesStats ) { super(node); @@ -271,6 +282,7 @@ public NodeStats( this.fileCacheStats = fileCacheStats; this.taskCancellationStats = taskCancellationStats; this.searchPipelineStats = searchPipelineStats; + this.segmentReplicationRejectionStats = segmentReplicationRejectionStats; this.repositoriesStats = repositoriesStats; } @@ -415,6 +427,10 @@ public SearchPipelineStats getSearchPipelineStats() { } @Nullable + public SegmentReplicationRejectionStats getSegmentReplicationRejectionStats() { + return segmentReplicationRejectionStats; + } + public RepositoriesStats getRepositoriesStats() { return repositoriesStats; } @@ -465,6 +481,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(resourceUsageStats); } + // TODO: change to V_2_12_0 on main after backport to 2.x + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(segmentReplicationRejectionStats); + } if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(repositoriesStats); } @@ -561,6 +581,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getResourceUsageStats() != null) { getResourceUsageStats().toXContent(builder, params); } + if (getSegmentReplicationRejectionStats() != null) { + getSegmentReplicationRejectionStats().toXContent(builder, params); + } + if (getRepositoriesStats() != null) { getRepositoriesStats().toXContent(builder, params); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 88dff20354aa2..fc72668d36413 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -215,6 +215,7 @@ public enum Metric { TASK_CANCELLATION("task_cancellation"), SEARCH_PIPELINE("search_pipeline"), RESOURCE_USAGE_STATS("resource_usage_stats"), + SEGMENT_REPLICATION_BACKPRESSURE("segment_replication_backpressure"), REPOSITORIES("repositories"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index aa02f8e580f4a..99cf42cfdc4d0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -126,6 +126,7 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.TASK_CANCELLATION.containedIn(metrics), NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics), NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics), + NodesStatsRequest.Metric.SEGMENT_REPLICATION_BACKPRESSURE.containedIn(metrics), NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index f51fabbfb2388..5efec8b876435 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -170,6 +170,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java index 4284daf9ffef4..d9d480e7b2b27 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java @@ -106,10 +106,11 @@ public SegmentReplicationPressureService( ClusterService clusterService, IndicesService indicesService, ShardStateAction shardStateAction, + SegmentReplicationStatsTracker tracker, ThreadPool threadPool ) { this.indicesService = indicesService; - this.tracker = new SegmentReplicationStatsTracker(this.indicesService); + this.tracker = tracker; this.shardStateAction = shardStateAction; this.threadPool = threadPool; diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java new file mode 100644 index 0000000000000..9f9f150ebe2d7 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.Version; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Segment replication rejection stats. + * + * @opensearch.internal + */ +public class SegmentReplicationRejectionStats implements Writeable, ToXContentFragment { + + /** + * Total rejections due to segment replication backpressure + */ + private long totalRejectionCount; + + public SegmentReplicationRejectionStats(final long totalRejectionCount) { + this.totalRejectionCount = totalRejectionCount; + } + + public SegmentReplicationRejectionStats(StreamInput in) throws IOException { + // TODO: change to V_2_12_0 on main after backport to 2.x + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.totalRejectionCount = in.readVLong(); + } + } + + public long getTotalRejectionCount() { + return totalRejectionCount; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("segment_replication_backpressure"); + builder.field("total_rejected_requests", totalRejectionCount); + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // TODO: change to V_2_12_0 on main after backport to 2.x + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(totalRejectionCount); + } + } + + @Override + public String toString() { + return "SegmentReplicationRejectionStats{ totalRejectedRequestCount=" + totalRejectionCount + '}'; + } + +} diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java index 6d5c00c08caff..f5fc8aa1c1eea 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java @@ -33,6 +33,14 @@ public SegmentReplicationStatsTracker(IndicesService indicesService) { rejectionCount = ConcurrentCollections.newConcurrentMap(); } + public SegmentReplicationRejectionStats getTotalRejectionStats() { + return new SegmentReplicationRejectionStats(this.rejectionCount.values().stream().mapToInt(AtomicInteger::get).sum()); + } + + protected Map getRejectionCount() { + return rejectionCount; + } + public SegmentReplicationStats getStats() { Map stats = new HashMap<>(); for (IndexService indexService : indicesService) { diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index c9148f382a028..711a90d424ac3 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -136,6 +136,7 @@ import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.recovery.RemoteStoreRestoreService; @@ -977,6 +978,7 @@ protected Node( transportService.getTaskManager() ); + final SegmentReplicationStatsTracker segmentReplicationStatsTracker = new SegmentReplicationStatsTracker(indicesService); RepositoriesModule repositoriesModule = new RepositoriesModule( this.environment, pluginsService.filterPlugins(RepositoryPlugin.class), @@ -1116,6 +1118,7 @@ protected Node( fileCache, taskCancellationMonitoringService, resourceUsageCollectorService, + segmentReplicationStatsTracker, repositoryService ); @@ -1246,6 +1249,7 @@ protected Node( b.bind(MetricsRegistry.class).toInstance(metricsRegistry); b.bind(RemoteClusterStateService.class).toProvider(() -> remoteClusterStateService); b.bind(PersistedStateRegistry.class).toInstance(persistedStateRegistry); + b.bind(SegmentReplicationStatsTracker.class).toInstance(segmentReplicationStatsTracker); }); injector = modules.createInjector(); diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index e2d7bc2c86ba3..49dde0b81cac7 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -48,6 +48,7 @@ import org.opensearch.discovery.Discovery; import org.opensearch.http.HttpServerTransport; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.IndicesService; import org.opensearch.ingest.IngestService; @@ -96,6 +97,8 @@ public class NodeService implements Closeable { private final TaskCancellationMonitoringService taskCancellationMonitoringService; private final RepositoriesService repositoriesService; + private final SegmentReplicationStatsTracker segmentReplicationStatsTracker; + NodeService( Settings settings, ThreadPool threadPool, @@ -119,6 +122,7 @@ public class NodeService implements Closeable { FileCache fileCache, TaskCancellationMonitoringService taskCancellationMonitoringService, ResourceUsageCollectorService resourceUsageCollectorService, + SegmentReplicationStatsTracker segmentReplicationStatsTracker, RepositoriesService repositoriesService ) { this.settings = settings; @@ -146,6 +150,7 @@ public class NodeService implements Closeable { this.repositoriesService = repositoriesService; clusterService.addStateApplier(ingestService); clusterService.addStateApplier(searchPipelineService); + this.segmentReplicationStatsTracker = segmentReplicationStatsTracker; } public NodeInfo info( @@ -226,6 +231,7 @@ public NodeStats stats( boolean taskCancellation, boolean searchPipelineStats, boolean resourceUsageStats, + boolean segmentReplicationTrackerStats, boolean repositoriesStats ) { // for indices stats we want to include previous allocated shards stats as well (it will @@ -256,6 +262,7 @@ public NodeStats stats( fileCacheStats && fileCache != null ? fileCache.fileCacheStats() : null, taskCancellation ? this.taskCancellationMonitoringService.stats() : null, searchPipelineStats ? this.searchPipelineService.stats() : null, + segmentReplicationTrackerStats ? this.segmentReplicationStatsTracker.getTotalRejectionStats() : null, repositoriesStats ? this.repositoriesService.getRepositoriesStats() : null ); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 3491f18da9550..ebdd012006fb2 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -49,6 +49,7 @@ import org.opensearch.discovery.DiscoveryStats; import org.opensearch.http.HttpStats; import org.opensearch.index.ReplicationStats; +import org.opensearch.index.SegmentReplicationRejectionStats; import org.opensearch.index.remote.RemoteSegmentStats; import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.RemoteTranslogStats; @@ -417,6 +418,17 @@ public void testSerialization() throws IOException { assertEquals(aResourceUsageStats.getTimestamp(), bResourceUsageStats.getTimestamp()); }); } + SegmentReplicationRejectionStats segmentReplicationRejectionStats = nodeStats.getSegmentReplicationRejectionStats(); + SegmentReplicationRejectionStats deserializedSegmentReplicationRejectionStats = deserializedNodeStats + .getSegmentReplicationRejectionStats(); + if (segmentReplicationRejectionStats == null) { + assertNull(deserializedSegmentReplicationRejectionStats); + } else { + assertEquals( + segmentReplicationRejectionStats.getTotalRejectionCount(), + deserializedSegmentReplicationRejectionStats.getTotalRejectionCount() + ); + } ScriptCacheStats scriptCacheStats = nodeStats.getScriptCacheStats(); ScriptCacheStats deserializedScriptCacheStats = deserializedNodeStats.getScriptCacheStats(); if (scriptCacheStats == null) { @@ -812,6 +824,11 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { } nodesResourceUsageStats = new NodesResourceUsageStats(resourceUsageStatsMap); } + SegmentReplicationRejectionStats segmentReplicationRejectionStats = null; + if (frequently()) { + segmentReplicationRejectionStats = new SegmentReplicationRejectionStats(randomNonNegativeLong()); + } + ClusterManagerThrottlingStats clusterManagerThrottlingStats = null; if (frequently()) { clusterManagerThrottlingStats = new ClusterManagerThrottlingStats(); @@ -853,6 +870,7 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { null, null, null, + segmentReplicationRejectionStats, null ); } diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index 6f03e87bf5824..f037b75dc16a3 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -192,6 +192,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -220,6 +221,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -248,6 +250,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ) ); @@ -307,6 +310,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -335,6 +339,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -363,6 +368,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ) ); diff --git a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java index 34fa13f0ba62c..478fdcb24f76a 100644 --- a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java @@ -278,6 +278,13 @@ private SegmentReplicationPressureService buildPressureService(Settings settings ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - return new SegmentReplicationPressureService(settings, clusterService, indicesService, shardStateAction, mock(ThreadPool.class)); + return new SegmentReplicationPressureService( + settings, + clusterService, + indicesService, + shardStateAction, + new SegmentReplicationStatsTracker(indicesService), + mock(ThreadPool.class) + ); } } diff --git a/server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java b/server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java new file mode 100644 index 0000000000000..04423d583e8f9 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.test.OpenSearchTestCase; + +import org.mockito.Mockito; + +import static org.mockito.Mockito.mock; + +public class SegmentReplicationStatsTrackerTests extends OpenSearchTestCase { + + private IndicesService indicesService = mock(IndicesService.class); + + public void testRejectedCount() { + SegmentReplicationStatsTracker segmentReplicationStatsTracker = new SegmentReplicationStatsTracker(indicesService); + + // Verify that total rejection count is 0 on an empty rejectionCount map in statsTracker. + assertTrue(segmentReplicationStatsTracker.getRejectionCount().isEmpty()); + assertEquals(segmentReplicationStatsTracker.getTotalRejectionStats().getTotalRejectionCount(), 0L); + + // Verify that total rejection count is 1 after incrementing rejectionCount. + segmentReplicationStatsTracker.incrementRejectionCount(Mockito.mock(ShardId.class)); + assertEquals(segmentReplicationStatsTracker.getTotalRejectionStats().getTotalRejectionCount(), 1L); + } + +} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 710717532ceb4..b7a2baacba611 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -178,6 +178,7 @@ import org.opensearch.gateway.TransportNodesListGatewayStartedShards; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.SegmentReplicationPressureService; +import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.remote.RemoteStorePressureService; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; @@ -2188,6 +2189,7 @@ public void onFailure(final Exception e) { clusterService, mock(IndicesService.class), mock(ShardStateAction.class), + mock(SegmentReplicationStatsTracker.class), mock(ThreadPool.class) ), mock(RemoteStorePressureService.class), diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index 60a54110fd0b4..2ba4de5e54a67 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -122,6 +122,7 @@ List adjustNodesStats(List nodesStats) { nodeStats.getFileCacheStats(), nodeStats.getTaskCancellationStats(), nodeStats.getSearchPipelineStats(), + nodeStats.getSegmentReplicationRejectionStats(), nodeStats.getRepositoriesStats() ); }).collect(Collectors.toList()); diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 898e125b94954..63d8f069bebea 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2722,6 +2722,7 @@ public void ensureEstimatedStats() { false, false, false, + false, false ); assertThat( From 7c5a806d5bbee77c0c4a184a500bf5522a8d8cd7 Mon Sep 17 00:00:00 2001 From: Movva Ajaykumar Date: Sat, 21 Oct 2023 13:31:09 +0530 Subject: [PATCH 062/178] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting (#9286) * Changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting (#9286) Signed-off-by: Ajay Kumar Movva --- CHANGELOG.md | 1 + .../TransportReplicationAction.java | 10 +- .../common/network/NetworkModule.java | 16 +- .../common/settings/ClusterSettings.java | 8 +- .../main/java/org/opensearch/node/Node.java | 19 +- .../AdmissionControlService.java | 104 +++++++++ .../AdmissionControlSettings.java | 83 ++++++++ .../controllers/AdmissionController.java | 70 ++++++ .../CPUBasedAdmissionController.java | 55 +++++ .../controllers/package-info.java | 12 ++ .../enums/AdmissionControlMode.java | 66 ++++++ .../enums/TransportActionType.java | 45 ++++ .../admissioncontrol/enums/package-info.java | 12 ++ .../admissioncontrol/package-info.java | 12 ++ .../CPUBasedAdmissionControllerSettings.java | 110 ++++++++++ .../settings/package-info.java | 11 + .../AdmissionControlTransportHandler.java | 65 ++++++ .../AdmissionControlTransportInterceptor.java | 40 ++++ .../transport/package-info.java | 11 + .../ratelimitting/package-info.java | 12 ++ .../common/network/NetworkModuleTests.java | 199 +++++++++++++++++- .../AdmissionControlServiceTests.java | 140 ++++++++++++ .../AdmissionControlSettingsTests.java | 103 +++++++++ .../CPUBasedAdmissionControllerTests.java | 109 ++++++++++ .../enums/AdmissionControlModeTests.java | 29 +++ .../enums/TransportActionTypeTests.java | 27 +++ ...CPUBasedAdmissionControlSettingsTests.java | 153 ++++++++++++++ ...AdmissionControlTransportHandlerTests.java | 92 ++++++++ 28 files changed, 1595 insertions(+), 19 deletions(-) create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettings.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionController.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/package-info.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlMode.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionType.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/package-info.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/package-info.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettings.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/package-info.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/package-info.java create mode 100644 server/src/main/java/org/opensearch/ratelimitting/package-info.java create mode 100644 server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java create mode 100644 server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java create mode 100644 server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionControllerTests.java create mode 100644 server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlModeTests.java create mode 100644 server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionTypeTests.java create mode 100644 server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java create mode 100644 server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index d7d492679c79d..374dd4ab57ee6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) - [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) - [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index b68bd13cfed80..ddebdc5530e70 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -134,6 +134,12 @@ public abstract class TransportReplicationAction< Setting.Property.NodeScope ); + /** + * Making primary and replica actions suffixes as constant + */ + public static final String PRIMARY_ACTION_SUFFIX = "[p]"; + public static final String REPLICA_ACTION_SUFFIX = "[r]"; + protected final ThreadPool threadPool; protected final TransportService transportService; protected final ClusterService clusterService; @@ -204,8 +210,8 @@ protected TransportReplicationAction( this.shardStateAction = shardStateAction; this.executor = executor; - this.transportPrimaryAction = actionName + "[p]"; - this.transportReplicaAction = actionName + "[r]"; + this.transportPrimaryAction = actionName + PRIMARY_ACTION_SUFFIX; + this.transportReplicaAction = actionName + REPLICA_ACTION_SUFFIX; this.initialRetryBackoffBound = REPLICATION_INITIAL_RETRY_BACKOFF_BOUND.get(settings); this.retryTimeout = REPLICATION_RETRY_TIMEOUT.get(settings); diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index 0734659d8ee72..821d48fccf48c 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -131,7 +131,7 @@ public final class NetworkModule { private final Map> transportFactories = new HashMap<>(); private final Map> transportHttpFactories = new HashMap<>(); - private final List transportIntercetors = new ArrayList<>(); + private final List transportInterceptors = new ArrayList<>(); /** * Creates a network module that custom networking classes can be plugged into. @@ -149,9 +149,13 @@ public NetworkModule( NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, ClusterSettings clusterSettings, - Tracer tracer + Tracer tracer, + List transportInterceptors ) { this.settings = settings; + if (transportInterceptors != null) { + transportInterceptors.forEach(this::registerTransportInterceptor); + } for (NetworkPlugin plugin : plugins) { Map> httpTransportFactory = plugin.getHttpTransports( settings, @@ -180,11 +184,11 @@ public NetworkModule( for (Map.Entry> entry : transportFactory.entrySet()) { registerTransport(entry.getKey(), entry.getValue()); } - List transportInterceptors = plugin.getTransportInterceptors( + List pluginTransportInterceptors = plugin.getTransportInterceptors( namedWriteableRegistry, threadPool.getThreadContext() ); - for (TransportInterceptor interceptor : transportInterceptors) { + for (TransportInterceptor interceptor : pluginTransportInterceptors) { registerTransportInterceptor(interceptor); } } @@ -264,7 +268,7 @@ public Supplier getTransportSupplier() { * Registers a new {@link TransportInterceptor} */ private void registerTransportInterceptor(TransportInterceptor interceptor) { - this.transportIntercetors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); + this.transportInterceptors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); } /** @@ -272,7 +276,7 @@ private void registerTransportInterceptor(TransportInterceptor interceptor) { * @see #registerTransportInterceptor(TransportInterceptor) */ public TransportInterceptor getTransportInterceptor() { - return new CompositeTransportInterceptor(this.transportIntercetors); + return new CompositeTransportInterceptor(this.transportInterceptors); } static final class CompositeTransportInterceptor implements TransportInterceptor { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 76883c200542e..7ac7da819b215 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -134,6 +134,8 @@ import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.decider.EnableAssignmentDecider; import org.opensearch.plugins.PluginsService; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; +import org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.rest.BaseRestHandler; import org.opensearch.script.ScriptService; @@ -682,7 +684,11 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, - IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING + IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, + AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, + CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, + CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT ) ) ); diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 711a90d424ac3..e80b768074fc7 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -197,6 +197,8 @@ import org.opensearch.plugins.SearchPlugin; import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.plugins.TelemetryPlugin; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.ratelimitting.admissioncontrol.transport.AdmissionControlTransportInterceptor; import org.opensearch.repositories.RepositoriesModule; import org.opensearch.repositories.RepositoriesService; import org.opensearch.rest.RestController; @@ -820,6 +822,7 @@ protected Node( remoteStoreStatsTrackerFactory, recoverySettings ); + final AliasValidator aliasValidator = new AliasValidator(); final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService, systemIndices); @@ -891,6 +894,17 @@ protected Node( final RestController restController = actionModule.getRestController(); + final AdmissionControlService admissionControlService = new AdmissionControlService( + settings, + clusterService.getClusterSettings(), + threadPool + ); + + AdmissionControlTransportInterceptor admissionControlTransportInterceptor = new AdmissionControlTransportInterceptor( + admissionControlService + ); + + List transportInterceptors = List.of(admissionControlTransportInterceptor); final NetworkModule networkModule = new NetworkModule( settings, pluginsService.filterPlugins(NetworkPlugin.class), @@ -903,8 +917,10 @@ protected Node( networkService, restController, clusterService.getClusterSettings(), - tracer + tracer, + transportInterceptors ); + Collection>> indexTemplateMetadataUpgraders = pluginsService.filterPlugins( Plugin.class ).stream().map(Plugin::getIndexTemplateMetadataUpgrader).collect(Collectors.toList()); @@ -1181,6 +1197,7 @@ protected Node( b.bind(IndexingPressureService.class).toInstance(indexingPressureService); b.bind(TaskResourceTrackingService.class).toInstance(taskResourceTrackingService); b.bind(SearchBackpressureService.class).toInstance(searchBackpressureService); + b.bind(AdmissionControlService.class).toInstance(admissionControlService); b.bind(UsageService.class).toInstance(usageService); b.bind(AggregationUsageService.class).toInstance(searchModule.getValuesSourceRegistry().getUsageService()); b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java new file mode 100644 index 0000000000000..2cc409b0e4465 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CPUBasedAdmissionController; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER; + +/** + * Admission control Service that bootstraps and manages all the Admission Controllers in OpenSearch. + */ +public class AdmissionControlService { + private final ThreadPool threadPool; + public final AdmissionControlSettings admissionControlSettings; + private final ConcurrentMap ADMISSION_CONTROLLERS; + private static final Logger logger = LogManager.getLogger(AdmissionControlService.class); + private final ClusterSettings clusterSettings; + private final Settings settings; + + /** + * + * @param settings Immutable settings instance + * @param clusterSettings ClusterSettings Instance + * @param threadPool ThreadPool Instance + */ + public AdmissionControlService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + this.threadPool = threadPool; + this.admissionControlSettings = new AdmissionControlSettings(clusterSettings, settings); + this.ADMISSION_CONTROLLERS = new ConcurrentHashMap<>(); + this.clusterSettings = clusterSettings; + this.settings = settings; + this.initialise(); + } + + /** + * Initialise and Register all the admissionControllers + */ + private void initialise() { + // Initialise different type of admission controllers + registerAdmissionController(CPU_BASED_ADMISSION_CONTROLLER); + } + + /** + * Handler to trigger registered admissionController + */ + public void applyTransportAdmissionControl(String action) { + this.ADMISSION_CONTROLLERS.forEach((name, admissionController) -> { admissionController.apply(action); }); + } + + /** + * + * @param admissionControllerName admissionControllerName to register into the service. + */ + public void registerAdmissionController(String admissionControllerName) { + AdmissionController admissionController = this.controllerFactory(admissionControllerName); + this.ADMISSION_CONTROLLERS.put(admissionControllerName, admissionController); + } + + /** + * @return AdmissionController Instance + */ + private AdmissionController controllerFactory(String admissionControllerName) { + switch (admissionControllerName) { + case CPU_BASED_ADMISSION_CONTROLLER: + return new CPUBasedAdmissionController(admissionControllerName, this.settings, this.clusterSettings); + default: + throw new IllegalArgumentException("Not Supported AdmissionController : " + admissionControllerName); + } + } + + /** + * + * @return list of the registered admissionControllers + */ + public List getAdmissionControllers() { + return new ArrayList<>(this.ADMISSION_CONTROLLERS.values()); + } + + /** + * + * @param controllerName name of the admissionController + * @return instance of the AdmissionController Instance + */ + public AdmissionController getAdmissionController(String controllerName) { + return this.ADMISSION_CONTROLLERS.getOrDefault(controllerName, null); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettings.java new file mode 100644 index 0000000000000..b557190ab54ac --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettings.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; + +/** + * Settings related to admission control. + * @opensearch.internal + */ +public final class AdmissionControlSettings { + + /** + * Default parameters for the AdmissionControlSettings + */ + public static class Defaults { + public static final String MODE = "disabled"; + } + + /** + * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set + * rejection will be performed, otherwise only rejection metrics will be populated. + */ + public static final Setting ADMISSION_CONTROL_TRANSPORT_LAYER_MODE = new Setting<>( + "admission_control.transport.mode", + Defaults.MODE, + AdmissionControlMode::fromName, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private volatile AdmissionControlMode transportLayeradmissionControlMode; + + /** + * @param clusterSettings clusterSettings Instance + * @param settings settings instance + */ + public AdmissionControlSettings(ClusterSettings clusterSettings, Settings settings) { + this.transportLayeradmissionControlMode = ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.get(settings); + clusterSettings.addSettingsUpdateConsumer(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, this::setAdmissionControlTransportLayerMode); + } + + /** + * + * @param admissionControlMode update the mode of admission control feature + */ + private void setAdmissionControlTransportLayerMode(AdmissionControlMode admissionControlMode) { + this.transportLayeradmissionControlMode = admissionControlMode; + } + + /** + * + * @return return the default mode of the admissionControl + */ + public AdmissionControlMode getAdmissionControlTransportLayerMode() { + return this.transportLayeradmissionControlMode; + } + + /** + * + * @return true based on the admission control feature is enforced else false + */ + public Boolean isTransportLayerAdmissionControlEnforced() { + return this.transportLayeradmissionControlMode == AdmissionControlMode.ENFORCED; + } + + /** + * + * @return true based on the admission control feature is enabled else false + */ + public Boolean isTransportLayerAdmissionControlEnabled() { + return this.transportLayeradmissionControlMode != AdmissionControlMode.DISABLED; + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java new file mode 100644 index 0000000000000..00564a9967f31 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Abstract class for Admission Controller in OpenSearch, which aims to provide resource based request admission control. + * It provides methods for any tracking-object that can be incremented (such as memory size), + * and admission control can be applied if configured limit has been reached + */ +public abstract class AdmissionController { + + private final AtomicLong rejectionCount; + private final String admissionControllerName; + + /** + * + * @param rejectionCount initialised rejectionCount value for AdmissionController + * @param admissionControllerName name of the admissionController + */ + public AdmissionController(AtomicLong rejectionCount, String admissionControllerName) { + this.rejectionCount = rejectionCount; + this.admissionControllerName = admissionControllerName; + } + + /** + * Return the current state of the admission controller + * @return true if admissionController is enabled for the transport layer else false + */ + public boolean isEnabledForTransportLayer(AdmissionControlMode admissionControlMode) { + return admissionControlMode != AdmissionControlMode.DISABLED; + } + + /** + * Increment the tracking-objects and apply the admission control if threshold is breached. + * Mostly applicable while applying admission controller + */ + public abstract void apply(String action); + + /** + * @return name of the admission-controller + */ + public String getName() { + return this.admissionControllerName; + } + + /** + * Adds the rejection count for the controller. Primarily used when copying controller states. + * @param count To add the value of the tracking resource object as the provided count + */ + public void addRejectionCount(long count) { + this.rejectionCount.addAndGet(count); + } + + /** + * @return current value of the rejection count metric tracked by the admission-controller. + */ + public long getRejectionCount() { + return this.rejectionCount.get(); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionController.java new file mode 100644 index 0000000000000..3a8956b2cce87 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionController.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Class for CPU Based Admission Controller in OpenSearch, which aims to provide CPU utilisation admission control. + * It provides methods to apply admission control if configured limit has been reached + */ +public class CPUBasedAdmissionController extends AdmissionController { + private static final Logger LOGGER = LogManager.getLogger(CPUBasedAdmissionController.class); + public CPUBasedAdmissionControllerSettings settings; + + /** + * + * @param admissionControllerName State of the admission controller + */ + public CPUBasedAdmissionController(String admissionControllerName, Settings settings, ClusterSettings clusterSettings) { + super(new AtomicLong(0), admissionControllerName); + this.settings = new CPUBasedAdmissionControllerSettings(clusterSettings, settings); + } + + /** + * This function will take of applying admission controller based on CPU usage + * @param action is the transport action + */ + @Override + public void apply(String action) { + // TODO Will extend this logic further currently just incrementing rejectionCount + if (this.isEnabledForTransportLayer(this.settings.getTransportLayerAdmissionControllerMode())) { + this.applyForTransportLayer(action); + } + } + + private void applyForTransportLayer(String actionName) { + // currently incrementing counts to evaluate the controller triggering as expected and using in testing so limiting to 10 + // TODO will update rejection logic further in next PR's + if (this.getRejectionCount() < 10) { + this.addRejectionCount(1); + } + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/package-info.java new file mode 100644 index 0000000000000..23746cc61a203 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes related to the different admission controllers + */ +package org.opensearch.ratelimitting.admissioncontrol.controllers; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlMode.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlMode.java new file mode 100644 index 0000000000000..2ae2436ba84e7 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlMode.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.enums; + +import java.util.Locale; + +/** + * Defines the AdmissionControlMode + */ +public enum AdmissionControlMode { + /** + * AdmissionController is completely disabled. + */ + DISABLED("disabled"), + + /** + * AdmissionController only monitors the rejection criteria for the requests. + */ + MONITOR("monitor_only"), + + /** + * AdmissionController monitors and rejects tasks that exceed resource usage thresholds. + */ + ENFORCED("enforced"); + + private final String mode; + + /** + * @param mode update mode of the admission controller + */ + AdmissionControlMode(String mode) { + this.mode = mode; + } + + /** + * + * @return mode of the admission controller + */ + public String getMode() { + return this.mode; + } + + /** + * + * @param name is the mode of the current + * @return Enum of AdmissionControlMode based on the mode + */ + public static AdmissionControlMode fromName(String name) { + switch (name.toLowerCase(Locale.ROOT)) { + case "disabled": + return DISABLED; + case "monitor_only": + return MONITOR; + case "enforced": + return ENFORCED; + default: + throw new IllegalArgumentException("Invalid AdmissionControlMode: " + name); + } + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionType.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionType.java new file mode 100644 index 0000000000000..f2fdca0cfe49b --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionType.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.enums; + +import java.util.Locale; + +/** + * Enums that defines the type of the transport requests + */ +public enum TransportActionType { + INDEXING("indexing"), + SEARCH("search"); + + private final String type; + + TransportActionType(String uriType) { + this.type = uriType; + } + + /** + * + * @return type of the request + */ + public String getType() { + return type; + } + + public static TransportActionType fromName(String name) { + name = name.toLowerCase(Locale.ROOT); + switch (name) { + case "indexing": + return INDEXING; + case "search": + return SEARCH; + default: + throw new IllegalArgumentException("Not Supported TransportAction Type: " + name); + } + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/package-info.java new file mode 100644 index 0000000000000..98b08ebd0a7bf --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains enums related to the different admission controller feature + */ +package org.opensearch.ratelimitting.admissioncontrol.enums; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/package-info.java new file mode 100644 index 0000000000000..b3dc229f86fb6 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains base classes needed for the admissionController Feature + */ +package org.opensearch.ratelimitting.admissioncontrol; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettings.java new file mode 100644 index 0000000000000..141e9b68db145 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettings.java @@ -0,0 +1,110 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.settings; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; + +import java.util.Arrays; +import java.util.List; + +/** + * Settings related to cpu based admission controller. + * @opensearch.internal + */ +public class CPUBasedAdmissionControllerSettings { + public static final String CPU_BASED_ADMISSION_CONTROLLER = "global_cpu_usage"; + + /** + * Default parameters for the CPUBasedAdmissionControllerSettings + */ + public static class Defaults { + public static final long CPU_USAGE = 95; + public static List TRANSPORT_LAYER_DEFAULT_URI_TYPE = Arrays.asList("indexing", "search"); + } + + private AdmissionControlMode transportLayerMode; + private Long searchCPULimit; + private Long indexingCPULimit; + + private final List transportActionsList; + /** + * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set + * rejection will be performed, otherwise only rejection metrics will be populated. + */ + public static final Setting CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE = new Setting<>( + "admission_control.transport.cpu_usage.mode_override", + AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, + AdmissionControlMode::fromName, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * This setting used to set the CPU Limits for the search requests by default it will use default IO usage limit + */ + public static final Setting SEARCH_CPU_USAGE_LIMIT = Setting.longSetting( + "admission_control.search.cpu_usage.limit", + Defaults.CPU_USAGE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * This setting used to set the CPU limits for the indexing requests by default it will use default IO usage limit + */ + public static final Setting INDEXING_CPU_USAGE_LIMIT = Setting.longSetting( + "admission_control.indexing.cpu_usage.limit", + Defaults.CPU_USAGE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + // currently limited to one setting will add further more settings in follow-up PR's + public CPUBasedAdmissionControllerSettings(ClusterSettings clusterSettings, Settings settings) { + this.transportLayerMode = CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.get(settings); + clusterSettings.addSettingsUpdateConsumer(CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, this::setTransportLayerMode); + this.searchCPULimit = SEARCH_CPU_USAGE_LIMIT.get(settings); + this.indexingCPULimit = INDEXING_CPU_USAGE_LIMIT.get(settings); + this.transportActionsList = Defaults.TRANSPORT_LAYER_DEFAULT_URI_TYPE; + clusterSettings.addSettingsUpdateConsumer(INDEXING_CPU_USAGE_LIMIT, this::setIndexingCPULimit); + clusterSettings.addSettingsUpdateConsumer(SEARCH_CPU_USAGE_LIMIT, this::setSearchCPULimit); + } + + private void setTransportLayerMode(AdmissionControlMode admissionControlMode) { + this.transportLayerMode = admissionControlMode; + } + + public AdmissionControlMode getTransportLayerAdmissionControllerMode() { + return transportLayerMode; + } + + public Long getSearchCPULimit() { + return searchCPULimit; + } + + public Long getIndexingCPULimit() { + return indexingCPULimit; + } + + public void setIndexingCPULimit(Long indexingCPULimit) { + this.indexingCPULimit = indexingCPULimit; + } + + public void setSearchCPULimit(Long searchCPULimit) { + this.searchCPULimit = searchCPULimit; + } + + public List getTransportActionsList() { + return transportActionsList; + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/package-info.java new file mode 100644 index 0000000000000..a024ccc756745 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/package-info.java @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** + * This package contains settings related classes for the different admission controllers + */ +package org.opensearch.ratelimitting.admissioncontrol.settings; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java new file mode 100644 index 0000000000000..7d0f5fbc17a51 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +/** + * AdmissionControl Handler to intercept Transport Requests. + * @param Transport Request + */ +public class AdmissionControlTransportHandler implements TransportRequestHandler { + + private final String action; + private final TransportRequestHandler actualHandler; + protected final Logger log = LogManager.getLogger(this.getClass()); + AdmissionControlService admissionControlService; + boolean forceExecution; + + public AdmissionControlTransportHandler( + String action, + TransportRequestHandler actualHandler, + AdmissionControlService admissionControlService, + boolean forceExecution + ) { + super(); + this.action = action; + this.actualHandler = actualHandler; + this.admissionControlService = admissionControlService; + this.forceExecution = forceExecution; + } + + /** + * @param request Transport Request that landed on the node + * @param channel Transport channel allows to send a response to a request + * @param task Current task that is executing + * @throws Exception when admission control rejected the requests + */ + @Override + public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { + // intercept all the transport requests here and apply admission control + try { + // TODO Need to evaluate if we need to apply admission control or not if force Execution is true will update in next PR. + this.admissionControlService.applyTransportAdmissionControl(this.action); + } catch (final OpenSearchRejectedExecutionException openSearchRejectedExecutionException) { + log.warn(openSearchRejectedExecutionException.getMessage()); + channel.sendResponse(openSearchRejectedExecutionException); + } catch (final Exception e) { + throw e; + } + actualHandler.messageReceived(request, channel, task); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java new file mode 100644 index 0000000000000..01cfcbd780006 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.transport; + +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.transport.TransportInterceptor; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +/** + * This class allows throttling to intercept requests on both the sender and the receiver side. + */ +public class AdmissionControlTransportInterceptor implements TransportInterceptor { + + AdmissionControlService admissionControlService; + + public AdmissionControlTransportInterceptor(AdmissionControlService admissionControlService) { + this.admissionControlService = admissionControlService; + } + + /** + * + * @return admissionController handler to intercept transport requests + */ + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + return new AdmissionControlTransportHandler<>(action, actualHandler, this.admissionControlService, forceExecution); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/package-info.java new file mode 100644 index 0000000000000..f97f31bc7b1db --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/package-info.java @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** + * This package contains transport related classes for the admissionController Feature + */ +package org.opensearch.ratelimitting.admissioncontrol.transport; diff --git a/server/src/main/java/org/opensearch/ratelimitting/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/package-info.java new file mode 100644 index 0000000000000..c04358e14284f --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Base OpenSearch Throttling package + */ +package org.opensearch.ratelimitting; diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index 0ca118fe422a5..ab51cafb039c2 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -57,6 +57,7 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -124,7 +125,7 @@ public Map> getTransports( return Collections.singletonMap("custom", custom); } }; - NetworkModule module = newNetworkModule(settings, plugin); + NetworkModule module = newNetworkModule(settings, null, plugin); assertSame(custom, module.getTransportSupplier()); } @@ -135,7 +136,7 @@ public void testRegisterHttpTransport() { .build(); Supplier custom = FakeHttpTransport::new; - NetworkModule module = newNetworkModule(settings, new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, new NetworkPlugin() { @Override public Map> getHttpTransports( Settings settings, @@ -155,7 +156,7 @@ public Map> getHttpTransports( assertSame(custom, module.getHttpServerTransportSupplier()); settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); - NetworkModule newModule = newNetworkModule(settings); + NetworkModule newModule = newNetworkModule(settings, null); expectThrows(IllegalStateException.class, () -> newModule.getHttpServerTransportSupplier()); } @@ -169,7 +170,7 @@ public void testOverrideDefault() { Supplier customTransport = () -> null; // content doesn't matter we check reference equality Supplier custom = FakeHttpTransport::new; Supplier def = FakeHttpTransport::new; - NetworkModule module = newNetworkModule(settings, new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, new NetworkPlugin() { @Override public Map> getTransports( Settings settings, @@ -214,7 +215,7 @@ public void testDefaultKeys() { Supplier custom = FakeHttpTransport::new; Supplier def = FakeHttpTransport::new; Supplier customTransport = () -> null; - NetworkModule module = newNetworkModule(settings, new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, new NetworkPlugin() { @Override public Map> getTransports( Settings settings, @@ -273,7 +274,7 @@ public TransportRequestHandler interceptHandler( return actualHandler; } }; - NetworkModule module = newNetworkModule(settings, new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, new NetworkPlugin() { @Override public List getTransportInterceptors( NamedWriteableRegistry namedWriteableRegistry, @@ -295,7 +296,7 @@ public List getTransportInterceptors( assertSame(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.get(0), interceptor); NullPointerException nullPointerException = expectThrows(NullPointerException.class, () -> { - newNetworkModule(settings, new NetworkPlugin() { + newNetworkModule(settings, null, new NetworkPlugin() { @Override public List getTransportInterceptors( NamedWriteableRegistry namedWriteableRegistry, @@ -309,7 +310,186 @@ public List getTransportInterceptors( assertEquals("interceptor must not be null", nullPointerException.getMessage()); } - private NetworkModule newNetworkModule(Settings settings, NetworkPlugin... plugins) { + public void testRegisterCoreInterceptor() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + AtomicInteger called = new AtomicInteger(0); + + TransportInterceptor interceptor = new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + called.incrementAndGet(); + if ("foo/bar/boom".equals(action)) { + assertTrue(forceExecution); + } else { + assertFalse(forceExecution); + } + return actualHandler; + } + }; + + List coreTransportInterceptors = new ArrayList<>(); + coreTransportInterceptors.add(interceptor); + + NetworkModule module = newNetworkModule(settings, coreTransportInterceptors); + + TransportInterceptor transportInterceptor = module.getTransportInterceptor(); + assertEquals(0, called.get()); + transportInterceptor.interceptHandler("foo/bar/boom", null, true, null); + assertEquals(1, called.get()); + transportInterceptor.interceptHandler("foo/baz/boom", null, false, null); + assertEquals(2, called.get()); + assertTrue(transportInterceptor instanceof NetworkModule.CompositeTransportInterceptor); + assertEquals(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.size(), 1); + assertSame(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.get(0), interceptor); + } + + public void testInterceptorOrder() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + AtomicInteger called = new AtomicInteger(0); + AtomicInteger called1 = new AtomicInteger(0); + + TransportInterceptor interceptor = new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + called.incrementAndGet(); + if ("foo/bar/boom".equals(action)) { + assertTrue(forceExecution); + } else { + assertFalse(forceExecution); + } + return actualHandler; + } + }; + + TransportInterceptor interceptor1 = new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + called1.incrementAndGet(); + if ("foo/bar/boom".equals(action)) { + assertTrue(forceExecution); + } else { + assertFalse(forceExecution); + } + return actualHandler; + } + }; + + List coreTransportInterceptors = new ArrayList<>(); + coreTransportInterceptors.add(interceptor1); + + NetworkModule module = newNetworkModule(settings, coreTransportInterceptors, new NetworkPlugin() { + @Override + public List getTransportInterceptors( + NamedWriteableRegistry namedWriteableRegistry, + ThreadContext threadContext + ) { + assertNotNull(threadContext); + return Collections.singletonList(interceptor); + } + }); + + TransportInterceptor transportInterceptor = module.getTransportInterceptor(); + assertEquals(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.size(), 2); + + assertEquals(0, called.get()); + assertEquals(0, called1.get()); + transportInterceptor.interceptHandler("foo/bar/boom", null, true, null); + assertEquals(1, called.get()); + assertEquals(1, called1.get()); + transportInterceptor.interceptHandler("foo/baz/boom", null, false, null); + assertEquals(2, called.get()); + assertEquals(2, called1.get()); + } + + public void testInterceptorOrderException() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + AtomicInteger called = new AtomicInteger(0); + AtomicInteger called1 = new AtomicInteger(0); + + TransportInterceptor interceptor = new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + called.incrementAndGet(); + if ("foo/bar/boom".equals(action)) { + assertTrue(forceExecution); + } else { + assertFalse(forceExecution); + } + return actualHandler; + } + }; + + TransportInterceptor interceptor1 = new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + called1.incrementAndGet(); + throw new RuntimeException("Handler Invoke Failed"); + } + }; + + List coreTransportInterceptors = new ArrayList<>(); + coreTransportInterceptors.add(interceptor1); + + NetworkModule module = newNetworkModule(settings, coreTransportInterceptors, new NetworkPlugin() { + @Override + public List getTransportInterceptors( + NamedWriteableRegistry namedWriteableRegistry, + ThreadContext threadContext + ) { + assertNotNull(threadContext); + return Collections.singletonList(interceptor); + } + }); + + TransportInterceptor transportInterceptor = module.getTransportInterceptor(); + assertEquals(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.size(), 2); + + assertEquals(0, called.get()); + assertEquals(0, called1.get()); + try { + transportInterceptor.interceptHandler("foo/bar/boom", null, true, null); + } catch (Exception e) { + assertEquals(0, called.get()); + assertEquals(1, called1.get()); + } + try { + transportInterceptor.interceptHandler("foo/baz/boom", null, false, null); + } catch (Exception e) { + assertEquals(0, called.get()); + assertEquals(2, called1.get()); + } + } + + private NetworkModule newNetworkModule( + Settings settings, + List coreTransportInterceptors, + NetworkPlugin... plugins + ) { return new NetworkModule( settings, Arrays.asList(plugins), @@ -322,7 +502,8 @@ private NetworkModule newNetworkModule(Settings settings, NetworkPlugin... plugi null, new NullDispatcher(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - NoopTracer.INSTANCE + NoopTracer.INSTANCE, + coreTransportInterceptors ); } } diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java new file mode 100644 index 0000000000000..bac4eaf3fd677 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java @@ -0,0 +1,140 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CPUBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.List; + +public class AdmissionControlServiceTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + private AdmissionControlService admissionControlService; + private String action = ""; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + action = "indexing"; + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testWhenAdmissionControllerRegistered() { + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + assertEquals(admissionControlService.getAdmissionControllers().size(), 1); + } + + public void testRegisterInvalidAdmissionController() { + String test = "TEST"; + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + assertEquals(admissionControlService.getAdmissionControllers().size(), 1); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> admissionControlService.registerAdmissionController(test) + ); + assertEquals(ex.getMessage(), "Not Supported AdmissionController : " + test); + } + + public void testAdmissionControllerSettings() { + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + AdmissionControlSettings admissionControlSettings = admissionControlService.admissionControlSettings; + List admissionControllerList = admissionControlService.getAdmissionControllers(); + assertEquals(admissionControllerList.size(), 1); + CPUBasedAdmissionController cpuBasedAdmissionController = (CPUBasedAdmissionController) admissionControlService + .getAdmissionController(CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER); + assertEquals( + admissionControlSettings.isTransportLayerAdmissionControlEnabled(), + cpuBasedAdmissionController.isEnabledForTransportLayer( + cpuBasedAdmissionController.settings.getTransportLayerAdmissionControllerMode() + ) + ); + + Settings settings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.DISABLED.getMode()) + .build(); + clusterService.getClusterSettings().applySettings(settings); + assertEquals( + admissionControlSettings.isTransportLayerAdmissionControlEnabled(), + cpuBasedAdmissionController.isEnabledForTransportLayer( + cpuBasedAdmissionController.settings.getTransportLayerAdmissionControllerMode() + ) + ); + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + + Settings newSettings = Settings.builder() + .put(settings) + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(newSettings); + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertTrue( + cpuBasedAdmissionController.isEnabledForTransportLayer( + cpuBasedAdmissionController.settings.getTransportLayerAdmissionControllerMode() + ) + ); + } + + public void testApplyAdmissionControllerDisabled() { + this.action = "indices:data/write/bulk[s][p]"; + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + admissionControlService.applyTransportAdmissionControl(this.action); + List admissionControllerList = admissionControlService.getAdmissionControllers(); + admissionControllerList.forEach(admissionController -> { assertEquals(admissionController.getRejectionCount(), 0); }); + } + + public void testApplyAdmissionControllerEnabled() { + this.action = "indices:data/write/bulk[s][p]"; + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + admissionControlService.applyTransportAdmissionControl(this.action); + assertEquals( + admissionControlService.getAdmissionController(CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount(), + 0 + ); + + Settings settings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(settings); + admissionControlService.applyTransportAdmissionControl(this.action); + List admissionControllerList = admissionControlService.getAdmissionControllers(); + assertEquals(admissionControllerList.size(), 1); + assertEquals( + admissionControlService.getAdmissionController(CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount(), + 1 + ); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java new file mode 100644 index 0000000000000..c11ee1cc608f6 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.List; +import java.util.Set; + +public class AdmissionControlSettingsTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testSettingsExists() { + Set> settings = ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; + assertTrue( + "All the admission controller settings should be supported built in settings", + settings.containsAll(List.of(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE)) + ); + } + + public void testDefaultSettings() { + AdmissionControlSettings admissionControlSettings = new AdmissionControlSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnforced()); + assertEquals(admissionControlSettings.getAdmissionControlTransportLayerMode().getMode(), AdmissionControlSettings.Defaults.MODE); + } + + public void testGetConfiguredSettings() { + Settings settings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode()) + .build(); + + AdmissionControlSettings admissionControlSettings = new AdmissionControlSettings(clusterService.getClusterSettings(), settings); + + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnforced()); + } + + public void testUpdateAfterGetDefaultSettings() { + AdmissionControlSettings admissionControlSettings = new AdmissionControlSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + Settings settings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()) + .build(); + clusterService.getClusterSettings().applySettings(settings); + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnforced()); + } + + public void testUpdateAfterGetConfiguredSettings() { + Settings settings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()) + .build(); + + AdmissionControlSettings admissionControlSettings = new AdmissionControlSettings(clusterService.getClusterSettings(), settings); + + Settings newSettings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode()) + .build(); + + clusterService.getClusterSettings().applySettings(newSettings); + + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnforced()); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionControllerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionControllerTests.java new file mode 100644 index 0000000000000..af6ec0749e709 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionControllerTests.java @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +public class CPUBasedAdmissionControllerTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + CPUBasedAdmissionController admissionController = null; + + String action = "TEST_ACTION"; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testCheckDefaultParameters() { + admissionController = new CPUBasedAdmissionController( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER, + Settings.EMPTY, + clusterService.getClusterSettings() + ); + assertEquals(admissionController.getName(), CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER); + assertEquals(admissionController.getRejectionCount(), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + assertFalse( + admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode()) + ); + } + + public void testCheckUpdateSettings() { + admissionController = new CPUBasedAdmissionController( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER, + Settings.EMPTY, + clusterService.getClusterSettings() + ); + Settings settings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(settings); + + assertEquals(admissionController.getName(), CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER); + assertEquals(admissionController.getRejectionCount(), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); + } + + public void testApplyControllerWithDefaultSettings() { + admissionController = new CPUBasedAdmissionController( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER, + Settings.EMPTY, + clusterService.getClusterSettings() + ); + assertEquals(admissionController.getRejectionCount(), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + action = "indices:data/write/bulk[s][p]"; + admissionController.apply(action); + assertEquals(admissionController.getRejectionCount(), 0); + } + + public void testApplyControllerWhenSettingsEnabled() { + Settings settings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + admissionController = new CPUBasedAdmissionController( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER, + settings, + clusterService.getClusterSettings() + ); + assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); + assertEquals(admissionController.getRejectionCount(), 0); + action = "indices:data/write/bulk[s][p]"; + admissionController.apply(action); + assertEquals(admissionController.getRejectionCount(), 1); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlModeTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlModeTests.java new file mode 100644 index 0000000000000..98c0f3c7cf24c --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlModeTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.enums; + +import org.opensearch.test.OpenSearchTestCase; + +public class AdmissionControlModeTests extends OpenSearchTestCase { + + public void testValidActionType() { + assertEquals(AdmissionControlMode.DISABLED.getMode(), "disabled"); + assertEquals(AdmissionControlMode.ENFORCED.getMode(), "enforced"); + assertEquals(AdmissionControlMode.MONITOR.getMode(), "monitor_only"); + assertEquals(AdmissionControlMode.fromName("disabled"), AdmissionControlMode.DISABLED); + assertEquals(AdmissionControlMode.fromName("enforced"), AdmissionControlMode.ENFORCED); + assertEquals(AdmissionControlMode.fromName("monitor_only"), AdmissionControlMode.MONITOR); + } + + public void testInValidActionType() { + String name = "TEST"; + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> AdmissionControlMode.fromName(name)); + assertEquals(ex.getMessage(), "Invalid AdmissionControlMode: " + name); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionTypeTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionTypeTests.java new file mode 100644 index 0000000000000..02f582c26f54e --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionTypeTests.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.enums; + +import org.opensearch.test.OpenSearchTestCase; + +public class TransportActionTypeTests extends OpenSearchTestCase { + + public void testValidActionType() { + assertEquals(TransportActionType.SEARCH.getType(), "search"); + assertEquals(TransportActionType.INDEXING.getType(), "indexing"); + assertEquals(TransportActionType.fromName("search"), TransportActionType.SEARCH); + assertEquals(TransportActionType.fromName("indexing"), TransportActionType.INDEXING); + } + + public void testInValidActionType() { + String name = "test"; + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> TransportActionType.fromName(name)); + assertEquals(ex.getMessage(), "Not Supported TransportAction Type: " + name); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java new file mode 100644 index 0000000000000..43103926a69a2 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java @@ -0,0 +1,153 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.settings; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.Set; + +public class CPUBasedAdmissionControlSettingsTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testSettingsExists() { + Set> settings = ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; + assertTrue( + "All the cpu based admission controller settings should be supported built in settings", + settings.containsAll( + Arrays.asList( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, + CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, + CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT + ) + ) + ); + } + + public void testDefaultSettings() { + CPUBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CPUBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + long percent = 95; + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), percent); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), percent); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportActionsList(), Arrays.asList("indexing", "search")); + } + + public void testGetConfiguredSettings() { + long percent = 95; + long indexingPercent = 85; + Settings settings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT.getKey(), indexingPercent) + .build(); + + CPUBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CPUBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + settings + ); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), percent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), indexingPercent); + } + + public void testUpdateAfterGetDefaultSettings() { + long percent = 95; + long searchPercent = 80; + CPUBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CPUBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + Settings settings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) + .build(); + clusterService.getClusterSettings().applySettings(settings); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), percent); + } + + public void testUpdateAfterGetConfiguredSettings() { + long percent = 95; + long indexingPercent = 85; + long searchPercent = 80; + Settings settings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) + .build(); + + CPUBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CPUBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + settings + ); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), percent); + + Settings updatedSettings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + .put(CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT.getKey(), indexingPercent) + .build(); + clusterService.getClusterSettings().applySettings(updatedSettings); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.MONITOR); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), indexingPercent); + + searchPercent = 70; + + updatedSettings = Settings.builder() + .put(updatedSettings) + .put(CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) + .build(); + clusterService.getClusterSettings().applySettings(updatedSettings); + + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), indexingPercent); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java new file mode 100644 index 0000000000000..03d4819a94045 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.transport; + +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.tasks.Task; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +public class AdmissionControlTransportHandlerTests extends OpenSearchTestCase { + AdmissionControlTransportHandler admissionControlTransportHandler; + + public void testHandlerInvoked() throws Exception { + String action = "TEST"; + InterceptingRequestHandler handler = new InterceptingRequestHandler<>(action); + admissionControlTransportHandler = new AdmissionControlTransportHandler( + action, + handler, + mock(AdmissionControlService.class), + false + ); + admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + assertEquals(1, handler.count); + } + + public void testHandlerInvokedRejectedException() throws Exception { + String action = "TEST"; + AdmissionControlService admissionControlService = mock(AdmissionControlService.class); + doThrow(new OpenSearchRejectedExecutionException()).when(admissionControlService).applyTransportAdmissionControl(action); + InterceptingRequestHandler handler = new InterceptingRequestHandler<>(action); + admissionControlTransportHandler = new AdmissionControlTransportHandler( + action, + handler, + admissionControlService, + false + ); + try { + admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + } catch (OpenSearchRejectedExecutionException exception) { + assertEquals(0, handler.count); + handler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + } + assertEquals(1, handler.count); + } + + public void testHandlerInvokedRandomException() throws Exception { + String action = "TEST"; + AdmissionControlService admissionControlService = mock(AdmissionControlService.class); + doThrow(new NullPointerException()).when(admissionControlService).applyTransportAdmissionControl(action); + InterceptingRequestHandler handler = new InterceptingRequestHandler<>(action); + admissionControlTransportHandler = new AdmissionControlTransportHandler( + action, + handler, + admissionControlService, + false + ); + try { + admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + } catch (Exception exception) { + assertEquals(0, handler.count); + handler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + } + assertEquals(1, handler.count); + } + + private class InterceptingRequestHandler implements TransportRequestHandler { + private final String action; + public int count; + + public InterceptingRequestHandler(String action) { + this.action = action; + this.count = 0; + } + + @Override + public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { + this.count = this.count + 1; + } + } +} From 14d4a6389bd5a7612c14e2e6fcb5a39822af9ee0 Mon Sep 17 00:00:00 2001 From: Ashish Date: Sat, 21 Oct 2023 18:42:52 +0530 Subject: [PATCH 063/178] [Remote Store] Use time elapsed since last successful local refresh for refresh lag (#10803) * [Remote Store] Use time elapsed since last successful local refresh for time lag Signed-off-by: Ashish Singh * Incorporate PR review comments Signed-off-by: Ashish Singh --------- Signed-off-by: Ashish Singh --- ...emoteStoreBackpressureAndResiliencyIT.java | 6 ++- .../remote/RemoteSegmentTransferTracker.java | 53 ++++++++++++------- .../remote/RemoteStorePressureService.java | 1 - .../RemoteSegmentTransferTrackerTests.java | 39 +++++++++----- .../RemoteStorePressureServiceTests.java | 32 ++++++++--- 5 files changed, 86 insertions(+), 45 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java index 98586b60dcc69..f19c9db7874db 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java @@ -56,7 +56,7 @@ public void testWritesRejectedDueToBytesLagBreach() throws Exception { public void testWritesRejectedDueToTimeLagBreach() throws Exception { // Initially indexing happens with doc size of 1KB, then all remote store interactions start failing. Now, the // indexing happens with doc size of 1 byte leading to time lag limit getting exceeded and leading to rejections. - validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 15, "time_lag"); + validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 3, "time_lag"); } private void validateBackpressure( @@ -133,11 +133,13 @@ private RemoteSegmentTransferTracker.Stats stats() { return matches.get(0).getSegmentStats(); } - private void indexDocAndRefresh(BytesReference source, int iterations) { + private void indexDocAndRefresh(BytesReference source, int iterations) throws InterruptedException { for (int i = 0; i < iterations; i++) { client().prepareIndex(INDEX_NAME).setSource(source, MediaTypeRegistry.JSON).get(); refresh(INDEX_NAME); } + Thread.sleep(250); + client().prepareIndex(INDEX_NAME).setSource(source, MediaTypeRegistry.JSON).get(); } /** diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index 2a703f17aa953..fb65d9ef83be2 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -66,6 +67,12 @@ public class RemoteSegmentTransferTracker extends RemoteTransferTracker { */ private volatile long remoteRefreshTimeMs; + /** + * This is the time of first local refresh after the last successful remote refresh. When the remote store is in + * sync with local refresh, this will be reset to -1. + */ + private volatile long remoteRefreshStartTimeMs = -1; + /** * The refresh time(clock) of most recent remote refresh. */ @@ -76,11 +83,6 @@ public class RemoteSegmentTransferTracker extends RemoteTransferTracker { */ private volatile long refreshSeqNoLag; - /** - * Keeps the time (ms) lag computed so that we do not compute it for every request. - */ - private volatile long timeMsLag; - /** * Keeps track of the total bytes of segment files which were uploaded to remote store during last successful remote refresh */ @@ -132,14 +134,19 @@ public RemoteSegmentTransferTracker( logger = Loggers.getLogger(getClass(), shardId); // Both the local refresh time and remote refresh time are set with current time to give consistent view of time lag when it arises. long currentClockTimeMs = System.currentTimeMillis(); - long currentTimeMs = System.nanoTime() / 1_000_000L; + long currentTimeMs = currentTimeMsUsingSystemNanos(); localRefreshTimeMs = currentTimeMs; remoteRefreshTimeMs = currentTimeMs; + remoteRefreshStartTimeMs = currentTimeMs; localRefreshClockTimeMs = currentClockTimeMs; remoteRefreshClockTimeMs = currentClockTimeMs; this.directoryFileTransferTracker = directoryFileTransferTracker; } + public static long currentTimeMsUsingSystemNanos() { + return TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); + } + @Override public void incrementTotalUploadsFailed() { super.incrementTotalUploadsFailed(); @@ -180,19 +187,22 @@ public long getLocalRefreshClockTimeMs() { */ public void updateLocalRefreshTimeAndSeqNo() { updateLocalRefreshClockTimeMs(System.currentTimeMillis()); - updateLocalRefreshTimeMs(System.nanoTime() / 1_000_000L); + updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); updateLocalRefreshSeqNo(getLocalRefreshSeqNo() + 1); } // Visible for testing - void updateLocalRefreshTimeMs(long localRefreshTimeMs) { + synchronized void updateLocalRefreshTimeMs(long localRefreshTimeMs) { assert localRefreshTimeMs >= this.localRefreshTimeMs : "newLocalRefreshTimeMs=" + localRefreshTimeMs + " < " + "currentLocalRefreshTimeMs=" + this.localRefreshTimeMs; + boolean isRemoteInSyncBeforeLocalRefresh = this.localRefreshTimeMs == this.remoteRefreshTimeMs; this.localRefreshTimeMs = localRefreshTimeMs; - computeTimeMsLag(); + if (isRemoteInSyncBeforeLocalRefresh) { + this.remoteRefreshStartTimeMs = localRefreshTimeMs; + } } private void updateLocalRefreshClockTimeMs(long localRefreshClockTimeMs) { @@ -221,14 +231,18 @@ long getRemoteRefreshClockTimeMs() { return remoteRefreshClockTimeMs; } - public void updateRemoteRefreshTimeMs(long remoteRefreshTimeMs) { - assert remoteRefreshTimeMs >= this.remoteRefreshTimeMs : "newRemoteRefreshTimeMs=" - + remoteRefreshTimeMs + public synchronized void updateRemoteRefreshTimeMs(long refreshTimeMs) { + assert refreshTimeMs >= this.remoteRefreshTimeMs : "newRemoteRefreshTimeMs=" + + refreshTimeMs + " < " + "currentRemoteRefreshTimeMs=" + this.remoteRefreshTimeMs; - this.remoteRefreshTimeMs = remoteRefreshTimeMs; - computeTimeMsLag(); + this.remoteRefreshTimeMs = refreshTimeMs; + // When multiple refreshes have failed, there is a possibility that retry is ongoing while another refresh gets + // triggered. After the segments have been uploaded and before the below code runs, the updateLocalRefreshTimeAndSeqNo + // method is triggered, which will update the local localRefreshTimeMs. Now, the lag would basically become the + // time since the last refresh happened locally. + this.remoteRefreshStartTimeMs = refreshTimeMs == this.localRefreshTimeMs ? -1 : this.localRefreshTimeMs; } public void updateRemoteRefreshClockTimeMs(long remoteRefreshClockTimeMs) { @@ -243,12 +257,11 @@ public long getRefreshSeqNoLag() { return refreshSeqNoLag; } - private void computeTimeMsLag() { - timeMsLag = localRefreshTimeMs - remoteRefreshTimeMs; - } - public long getTimeMsLag() { - return timeMsLag; + if (remoteRefreshTimeMs == localRefreshTimeMs) { + return 0; + } + return currentTimeMsUsingSystemNanos() - remoteRefreshStartTimeMs; } public long getBytesLag() { @@ -354,7 +367,7 @@ public RemoteSegmentTransferTracker.Stats stats() { shardId, localRefreshClockTimeMs, remoteRefreshClockTimeMs, - timeMsLag, + getTimeMsLag(), localRefreshSeqNo, remoteRefreshSeqNo, uploadBytesStarted.get(), diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java index 2920b33921869..33cd40f802d43 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java @@ -180,7 +180,6 @@ public boolean validate(RemoteSegmentTransferTracker pressureTracker, ShardId sh return true; } if (pressureTracker.isUploadTimeMovingAverageReady() == false) { - logger.trace("upload time moving average is not ready"); return true; } long timeLag = pressureTracker.getTimeMsLag(); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java index 0bf00f9e48137..c87cdfcc8f1a1 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java @@ -23,6 +23,8 @@ import java.util.HashMap; import java.util.Map; +import static org.opensearch.index.remote.RemoteSegmentTransferTracker.currentTimeMsUsingSystemNanos; + public class RemoteSegmentTransferTrackerTests extends OpenSearchTestCase { private RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; private ClusterService clusterService; @@ -92,7 +94,7 @@ public void testUpdateLocalRefreshTimeMs() { directoryFileTransferTracker, remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - long refreshTimeMs = System.nanoTime() / 1_000_000L + randomIntBetween(10, 100); + long refreshTimeMs = currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100); transferTracker.updateLocalRefreshTimeMs(refreshTimeMs); assertEquals(refreshTimeMs, transferTracker.getLocalRefreshTimeMs()); } @@ -103,7 +105,7 @@ public void testUpdateRemoteRefreshTimeMs() { directoryFileTransferTracker, remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - long refreshTimeMs = System.nanoTime() / 1_000_000 + randomIntBetween(10, 100); + long refreshTimeMs = currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100); transferTracker.updateRemoteRefreshTimeMs(refreshTimeMs); assertEquals(refreshTimeMs, transferTracker.getRemoteRefreshTimeMs()); } @@ -133,20 +135,29 @@ public void testComputeSeqNoLagOnUpdate() { assertEquals(localRefreshSeqNo - remoteRefreshSeqNo, transferTracker.getRefreshSeqNoLag()); } - public void testComputeTimeLagOnUpdate() { + public void testComputeTimeLagOnUpdate() throws InterruptedException { transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - long currentLocalRefreshTimeMs = transferTracker.getLocalRefreshTimeMs(); - long currentTimeMs = System.nanoTime() / 1_000_000L; - long localRefreshTimeMs = currentTimeMs + randomIntBetween(100, 500); - long remoteRefreshTimeMs = currentTimeMs + randomIntBetween(50, 99); - transferTracker.updateLocalRefreshTimeMs(localRefreshTimeMs); - assertEquals(localRefreshTimeMs - currentLocalRefreshTimeMs, transferTracker.getTimeMsLag()); - transferTracker.updateRemoteRefreshTimeMs(remoteRefreshTimeMs); - assertEquals(localRefreshTimeMs - remoteRefreshTimeMs, transferTracker.getTimeMsLag()); + + // No lag if there is a remote upload corresponding to a local refresh + assertEquals(0, transferTracker.getTimeMsLag()); + + // Set a local refresh time that is higher than remote refresh time + Thread.sleep(1); + transferTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + + // Sleep for 100ms and then the lag should be within 100ms +/- 20ms + Thread.sleep(100); + assertTrue(Math.abs(transferTracker.getTimeMsLag() - 100) <= 20); + + transferTracker.updateRemoteRefreshTimeMs(transferTracker.getLocalRefreshTimeMs()); + transferTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + long random = randomIntBetween(50, 200); + Thread.sleep(random); + assertTrue(Math.abs(transferTracker.getTimeMsLag() - random) <= 20); } public void testAddUploadBytesStarted() { @@ -519,7 +530,7 @@ public void testStatsObjectCreation() { transferTracker = constructTracker(); RemoteSegmentTransferTracker.Stats transferTrackerStats = transferTracker.stats(); assertEquals(transferTracker.getShardId(), transferTrackerStats.shardId); - assertEquals(transferTracker.getTimeMsLag(), (int) transferTrackerStats.refreshTimeLagMs); + assertTrue(Math.abs(transferTracker.getTimeMsLag() - transferTrackerStats.refreshTimeLagMs) <= 20); assertEquals(transferTracker.getLocalRefreshSeqNo(), (int) transferTrackerStats.localRefreshNumber); assertEquals(transferTracker.getRemoteRefreshSeqNo(), (int) transferTrackerStats.remoteRefreshNumber); assertEquals(transferTracker.getBytesLag(), (int) transferTrackerStats.bytesLag); @@ -591,9 +602,9 @@ private RemoteSegmentTransferTracker constructTracker() { ); transferTracker.incrementTotalUploadsStarted(); transferTracker.incrementTotalUploadsFailed(); - transferTracker.updateUploadTimeMovingAverage(System.nanoTime() / 1_000_000L + randomIntBetween(10, 100)); + transferTracker.updateUploadTimeMovingAverage(currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100)); transferTracker.updateUploadBytesMovingAverage(99); - transferTracker.updateRemoteRefreshTimeMs(System.nanoTime() / 1_000_000L + randomIntBetween(10, 100)); + transferTracker.updateRemoteRefreshTimeMs(currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100)); transferTracker.incrementRejectionCount(); transferTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(10); transferTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(10, System.currentTimeMillis()); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java index de610083f3327..cb77174e612fd 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java @@ -21,8 +21,11 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import java.util.stream.IntStream; +import static org.opensearch.index.remote.RemoteSegmentTransferTracker.currentTimeMsUsingSystemNanos; import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexShard; public class RemoteStorePressureServiceTests extends OpenSearchTestCase { @@ -68,7 +71,7 @@ public void testIsSegmentsUploadBackpressureEnabled() { assertTrue(pressureService.isSegmentsUploadBackpressureEnabled()); } - public void testValidateSegmentUploadLag() { + public void testValidateSegmentUploadLag() throws InterruptedException { // Create the pressure tracker IndexShard indexShard = createIndexShard(shardId, true); remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); @@ -86,14 +89,27 @@ public void testValidateSegmentUploadLag() { sum.addAndGet(i); }); double avg = (double) sum.get() / 20; - long currentMs = System.nanoTime() / 1_000_000; - pressureTracker.updateLocalRefreshTimeMs((long) (currentMs + 12 * avg)); - pressureTracker.updateRemoteRefreshTimeMs(currentMs); - Exception e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); - assertTrue(e.getMessage().contains("due to remote segments lagging behind local segments")); - assertTrue(e.getMessage().contains("time_lag:114 ms dynamic_time_lag_threshold:95.0 ms")); - pressureTracker.updateRemoteRefreshTimeMs((long) (currentMs + 2 * avg)); + // We run this to ensure that the local and remote refresh time are not same anymore + while (pressureTracker.getLocalRefreshTimeMs() == currentTimeMsUsingSystemNanos()) { + Thread.sleep(10); + } + long localRefreshTimeMs = currentTimeMsUsingSystemNanos(); + pressureTracker.updateLocalRefreshTimeMs(localRefreshTimeMs); + + while (currentTimeMsUsingSystemNanos() - localRefreshTimeMs <= 20 * avg) { + Thread.sleep((long) (4 * avg)); + } + Exception e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); + String regex = "^rejected execution on primary shard:\\[index]\\[0] due to remote segments lagging behind " + + "local segments.time_lag:[0-9]{2,3} ms dynamic_time_lag_threshold:95\\.0 ms$"; + Pattern pattern = Pattern.compile(regex); + Matcher matcher = pattern.matcher(e.getMessage()); + assertTrue(matcher.matches()); + + pressureTracker.updateRemoteRefreshTimeMs(pressureTracker.getLocalRefreshTimeMs()); + pressureTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + Thread.sleep((long) (2 * avg)); pressureService.validateSegmentsUploadLag(shardId); // 2. bytes lag more than dynamic threshold From 911afc4002b3a9faa86e109871332af0caaa73d0 Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Sun, 22 Oct 2023 11:31:31 +0530 Subject: [PATCH 064/178] Remove remote store attributes from DiscoveryNode toString() (#10810) Signed-off-by: Dhwanil Patel --- .../cluster/node/DiscoveryNode.java | 8 +++++++- .../cluster/node/DiscoveryNodeTests.java | 19 +++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 4e49b25eb5789..0c58aabf95207 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -553,7 +553,13 @@ public String toString() { sb.append('}'); } if (!attributes.isEmpty()) { - sb.append(attributes); + sb.append( + attributes.entrySet() + .stream() + .filter(entry -> !entry.getKey().startsWith(REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)) // filter remote_store attributes + // from logging to reduce noise. + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) + ); } return sb.toString(); } diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 8b61e8f6d724d..c8a6fc76ce820 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -38,13 +38,16 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.test.NodeRoles; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Locale; +import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -81,6 +84,22 @@ public void testRolesAreSorted() { } + public void testRemoteStoreRedactionInToString() { + final Set roles = new HashSet<>(randomSubsetOf(DiscoveryNodeRole.BUILT_IN_ROLES)); + Map attributes = new HashMap<>(); + attributes.put(RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test-repo"); + attributes.put(RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "test-repo"); + final DiscoveryNode node = new DiscoveryNode( + "name", + "id", + new TransportAddress(TransportAddress.META_ADDRESS, 9200), + attributes, + roles, + Version.CURRENT + ); + assertFalse(node.toString().contains(RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)); + } + public void testDiscoveryNodeIsCreatedWithHostFromInetAddress() throws Exception { InetAddress inetAddress = randomBoolean() ? InetAddress.getByName("192.0.2.1") From 79c6e322f584e72b48793658f6a3456d1eea1fc2 Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Sun, 22 Oct 2023 11:54:10 +0530 Subject: [PATCH 065/178] Adds util to compare Metadata for remote state (#10661) * Adds util to compare Metadata for remote state durng restore flow when identifying if 2 clusterUUIDs have same cluster state Signed-off-by: bansvaru --- .../opensearch/cluster/metadata/Metadata.java | 17 +- .../remote/RemoteClusterStateService.java | 12 +- .../cluster/metadata/MetadataTests.java | 33 ++++ .../RemoteClusterStateServiceTests.java | 163 ++++++++++++------ 4 files changed, 168 insertions(+), 57 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 626903877b0c6..70c1d059a1b9e 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -922,19 +922,26 @@ public static boolean isGlobalStateEquals(Metadata metadata1, Metadata metadata2 if (!metadata1.coordinationMetadata.equals(metadata2.coordinationMetadata)) { return false; } - if (!metadata1.persistentSettings.equals(metadata2.persistentSettings)) { + if (!metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings)) { return false; } - if (!metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings)) { + if (!metadata1.clusterUUID.equals(metadata2.clusterUUID)) { return false; } - if (!metadata1.templates.equals(metadata2.templates())) { + if (metadata1.clusterUUIDCommitted != metadata2.clusterUUIDCommitted) { return false; } - if (!metadata1.clusterUUID.equals(metadata2.clusterUUID)) { + return isGlobalResourcesMetadataEquals(metadata1, metadata2); + } + + /** + * Compares Metadata entities persisted in Remote Store. + */ + public static boolean isGlobalResourcesMetadataEquals(Metadata metadata1, Metadata metadata2) { + if (!metadata1.persistentSettings.equals(metadata2.persistentSettings)) { return false; } - if (metadata1.clusterUUIDCommitted != metadata2.clusterUUIDCommitted) { + if (!metadata1.templates.equals(metadata2.templates())) { return false; } // Check if any persistent metadata needs to be saved diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 96ce2fc779ea0..13fbda77d8a2d 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -689,10 +689,11 @@ private Map getIndexMetadataMap( * @return {@link IndexMetadata} */ private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, UploadedIndexMetadata uploadedIndexMetadata) { + BlobContainer blobContainer = indexMetadataContainer(clusterName, clusterUUID, uploadedIndexMetadata.getIndexUUID()); try { String[] splitPath = uploadedIndexMetadata.getUploadedFilename().split("/"); return INDEX_METADATA_FORMAT.read( - indexMetadataContainer(clusterName, clusterUUID, uploadedIndexMetadata.getIndexUUID()), + blobContainer, splitPath[splitPath.length - 1], blobStoreRepository.getNamedXContentRegistry() ); @@ -882,7 +883,8 @@ private Map trimClusterUUIDs( } } else { ClusterMetadataManifest previousManifest = trimmedUUIDs.get(currentManifest.getPreviousClusterUUID()); - if (isMetadataEqual(currentManifest, previousManifest, clusterName)) { + if (isMetadataEqual(currentManifest, previousManifest, clusterName) + && isGlobalMetadataEqual(currentManifest, previousManifest, clusterName)) { trimmedUUIDs.remove(clusterUUID); } } @@ -912,6 +914,12 @@ private boolean isMetadataEqual(ClusterMetadataManifest first, ClusterMetadataMa return true; } + private boolean isGlobalMetadataEqual(ClusterMetadataManifest first, ClusterMetadataManifest second, String clusterName) { + Metadata secondGlobalMetadata = getGlobalMetadata(clusterName, second.getClusterUUID(), second); + Metadata firstGlobalMetadata = getGlobalMetadata(clusterName, first.getClusterUUID(), first); + return Metadata.isGlobalResourcesMetadataEquals(firstGlobalMetadata, secondGlobalMetadata); + } + private boolean isInvalidClusterUUID(ClusterMetadataManifest manifest) { return !manifest.isClusterUUIDCommitted(); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java index 40eefa6cdbf03..618fcb923bc60 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java @@ -627,6 +627,39 @@ public void testGlobalStateEqualsCoordinationMetadata() { assertFalse(Metadata.isGlobalStateEquals(metadata1, metadata2)); } + public void testGlobalResourcesStateEqualsCoordinationMetadata() { + CoordinationMetadata coordinationMetadata1 = new CoordinationMetadata( + randomNonNegativeLong(), + randomVotingConfig(), + randomVotingConfig(), + randomVotingConfigExclusions() + ); + Metadata metadata1 = Metadata.builder() + .coordinationMetadata(coordinationMetadata1) + .clusterUUID(randomAlphaOfLength(10)) + .clusterUUIDCommitted(false) + .hashesOfConsistentSettings(Map.of("a", "b")) + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); + CoordinationMetadata coordinationMetadata2 = new CoordinationMetadata( + randomNonNegativeLong(), + randomVotingConfig(), + randomVotingConfig(), + randomVotingConfigExclusions() + ); + Metadata metadata2 = Metadata.builder() + .coordinationMetadata(coordinationMetadata2) + .clusterUUIDCommitted(true) + .clusterUUID(randomAlphaOfLength(11)) + .hashesOfConsistentSettings(Map.of("b", "a")) + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); + + assertTrue(Metadata.isGlobalStateEquals(metadata1, metadata1)); + assertFalse(Metadata.isGlobalStateEquals(metadata1, metadata2)); + assertTrue(Metadata.isGlobalResourcesMetadataEquals(metadata1, metadata2)); + } + public void testSerializationWithIndexGraveyard() throws IOException { final IndexGraveyard graveyard = IndexGraveyardTests.createRandom(); final Metadata originalMeta = Metadata.builder().indexGraveyard(graveyard).build(); diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 5202f31c514ed..55094400581b4 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -9,6 +9,7 @@ package org.opensearch.gateway.remote; import org.opensearch.Version; +import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -27,6 +28,7 @@ import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; @@ -37,6 +39,7 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.indices.IndicesModule; import org.opensearch.repositories.FilterRepository; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryMissingException; @@ -65,11 +68,14 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.function.Supplier; +import java.util.stream.Stream; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatchers; +import static java.util.stream.Collectors.toList; import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateService.FORMAT_PARAMS; import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_CURRENT_CODEC_VERSION; @@ -126,11 +132,19 @@ public void setup() { .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .build(); + NamedXContentRegistry xContentRegistry = new NamedXContentRegistry( + Stream.of( + NetworkModule.getNamedXContents().stream(), + IndicesModule.getNamedXContents().stream(), + ClusterModule.getNamedXWriteables().stream() + ).flatMap(Function.identity()).collect(toList()) + ); + blobStoreRepository = mock(BlobStoreRepository.class); blobStore = mock(BlobStore.class); when(blobStoreRepository.blobStore()).thenReturn(blobStore); when(repositoriesService.repository("remote_store_repository")).thenReturn(blobStoreRepository); - when(blobStoreRepository.getNamedXContentRegistry()).thenReturn(new NamedXContentRegistry(new ArrayList<>())); + when(blobStoreRepository.getNamedXContentRegistry()).thenReturn(xContentRegistry); remoteClusterStateService = new RemoteClusterStateService( "test-node-id", repositoriesServiceSupplier, @@ -887,7 +901,7 @@ public void testGetValidPreviousClusterUUIDWithMultipleChains() throws IOExcepti "cluster-uuid3", "cluster-uuid1" ); - mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, randomBoolean()); remoteClusterStateService.start(); String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); @@ -1040,6 +1054,11 @@ public void testSingleConcurrentExecutionOfStaleManifestCleanup() throws Excepti } private void mockObjectsForGettingPreviousClusterUUID(Map clusterUUIDsPointers) throws IOException { + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, false); + } + + private void mockObjectsForGettingPreviousClusterUUID(Map clusterUUIDsPointers, boolean differGlobalMetadata) + throws IOException { final BlobPath blobPath = mock(BlobPath.class); when((blobStoreRepository.basePath())).thenReturn(blobPath); when(blobPath.add(anyString())).thenReturn(blobPath); @@ -1061,7 +1080,8 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste "cluster-uuid1", clusterUUIDsPointers.get("cluster-uuid1"), randomAlphaOfLength(10), - uploadedIndexMetadataList1 + uploadedIndexMetadataList1, + "test-metadata1" ); Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexMetadata indexMetadata1 = IndexMetadata.builder("index1") @@ -1074,8 +1094,12 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste .numberOfShards(1) .numberOfReplicas(1) .build(); + Metadata metadata1 = Metadata.builder() + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); Map indexMetadataMap1 = Map.of("index-uuid1", indexMetadata1, "index-uuid2", indexMetadata2); - mockBlobContainer(blobContainer1, clusterManifest1, indexMetadataMap1); + mockBlobContainerForGlobalMetadata(blobContainer1, clusterManifest1, metadata1); + mockBlobContainer(blobContainer1, clusterManifest1, indexMetadataMap1, ClusterMetadataManifest.CODEC_V1); List uploadedIndexMetadataList2 = List.of( new UploadedIndexMetadata("index1", "index-uuid1", "key1"), @@ -1085,7 +1109,8 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste "cluster-uuid2", clusterUUIDsPointers.get("cluster-uuid2"), randomAlphaOfLength(10), - uploadedIndexMetadataList2 + uploadedIndexMetadataList2, + "test-metadata2" ); IndexMetadata indexMetadata3 = IndexMetadata.builder("index1") .settings(indexSettings) @@ -1097,37 +1122,59 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste .numberOfShards(1) .numberOfReplicas(1) .build(); + Metadata metadata2 = Metadata.builder() + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); Map indexMetadataMap2 = Map.of("index-uuid1", indexMetadata3, "index-uuid2", indexMetadata4); - mockBlobContainer(blobContainer2, clusterManifest2, indexMetadataMap2); + mockBlobContainerForGlobalMetadata(blobContainer2, clusterManifest2, metadata2); + mockBlobContainer(blobContainer2, clusterManifest2, indexMetadataMap2, ClusterMetadataManifest.CODEC_V1); + + // differGlobalMetadata controls which one of IndexMetadata or Metadata object would be different + // when comparing cluster-uuid3 and cluster-uuid1 state. + // if set true, only Metadata will differ b/w cluster uuid1 and cluster uuid3. + // If set to false, only IndexMetadata would be different + // Adding difference in EXACTLY on of these randomly will help us test if our uuid trimming logic compares both + // IndexMetadata and Metadata when deciding if the remote state b/w two different cluster uuids is same. + List uploadedIndexMetadataList3 = differGlobalMetadata + ? new ArrayList<>(uploadedIndexMetadataList1) + : List.of(new UploadedIndexMetadata("index1", "index-uuid1", "key1")); + IndexMetadata indexMetadata5 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Map indexMetadataMap3 = differGlobalMetadata + ? new HashMap<>(indexMetadataMap1) + : Map.of("index-uuid1", indexMetadata5); + Metadata metadata3 = Metadata.builder() + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), !differGlobalMetadata).build()) + .build(); - List uploadedIndexMetadataList3 = List.of(new UploadedIndexMetadata("index1", "index-uuid1", "key1")); final ClusterMetadataManifest clusterManifest3 = generateClusterMetadataManifest( "cluster-uuid3", clusterUUIDsPointers.get("cluster-uuid3"), randomAlphaOfLength(10), - uploadedIndexMetadataList3 + uploadedIndexMetadataList3, + "test-metadata3" ); - IndexMetadata indexMetadata5 = IndexMetadata.builder("index1") - .settings(indexSettings) - .numberOfShards(1) - .numberOfReplicas(1) - .build(); - Map indexMetadataMap3 = Map.of("index-uuid1", indexMetadata5); - mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3); - - when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn( - uuidBlobContainer, - blobContainer1, - blobContainer1, - blobContainer3, - blobContainer3, - blobContainer2, - blobContainer2, - blobContainer1, - blobContainer2, - blobContainer1, - blobContainer2 + mockBlobContainerForGlobalMetadata(blobContainer3, clusterManifest3, metadata3); + mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3, ClusterMetadataManifest.CODEC_V1); + + ArrayList mockBlobContainerOrderedList = new ArrayList<>( + List.of(blobContainer1, blobContainer1, blobContainer3, blobContainer3, blobContainer2, blobContainer2) ); + + if (differGlobalMetadata) { + mockBlobContainerOrderedList.addAll( + List.of(blobContainer3, blobContainer1, blobContainer3, blobContainer1, blobContainer1, blobContainer3) + ); + } + mockBlobContainerOrderedList.addAll( + List.of(blobContainer2, blobContainer1, blobContainer2, blobContainer1, blobContainer1, blobContainer2) + ); + BlobContainer[] mockBlobContainerOrderedArray = new BlobContainer[mockBlobContainerOrderedList.size()]; + mockBlobContainerOrderedList.toArray(mockBlobContainerOrderedArray); + when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn(uuidBlobContainer, mockBlobContainerOrderedArray); when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); } @@ -1135,7 +1182,8 @@ private ClusterMetadataManifest generateClusterMetadataManifest( String clusterUUID, String previousClusterUUID, String stateUUID, - List uploadedIndexMetadata + List uploadedIndexMetadata, + String globalMetadataFileName ) { return ClusterMetadataManifest.builder() .indices(uploadedIndexMetadata) @@ -1148,7 +1196,8 @@ private ClusterMetadataManifest generateClusterMetadataManifest( .previousClusterUUID(previousClusterUUID) .committed(true) .clusterUUIDCommitted(true) - .globalMetadataFileName("test-global-metadata") + .globalMetadataFileName(globalMetadataFileName) + .codecVersion(ClusterMetadataManifest.CODEC_V1) .build(); } @@ -1181,17 +1230,29 @@ private void mockBlobContainer( ClusterMetadataManifest clusterMetadataManifest, Map indexMetadataMap ) throws IOException { - BlobMetadata blobMetadata = new PlainBlobMetadata("manifestFileName", 1); + mockBlobContainer(blobContainer, clusterMetadataManifest, indexMetadataMap, ClusterMetadataManifest.CODEC_V0); + } + + private void mockBlobContainer( + BlobContainer blobContainer, + ClusterMetadataManifest clusterMetadataManifest, + Map indexMetadataMap, + int codecVersion + ) throws IOException { + String manifestFileName = codecVersion >= ClusterMetadataManifest.CODEC_V1 + ? "manifest__manifestFileName__abcd__abcd__abcd__1" + : "manifestFileName"; + BlobMetadata blobMetadata = new PlainBlobMetadata(manifestFileName, 1); when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) .thenReturn(Arrays.asList(blobMetadata)); BytesReference bytes = RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( clusterMetadataManifest, - "manifestFileName", + manifestFileName, blobStoreRepository.getCompressor(), FORMAT_PARAMS ); - when(blobContainer.readBlob("manifestFileName")).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); + when(blobContainer.readBlob(manifestFileName)).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { try { @@ -1200,15 +1261,15 @@ private void mockBlobContainer( return; } String fileName = uploadedIndexMetadata.getUploadedFilename(); - BytesReference bytesIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.serialize( - indexMetadata, - fileName, - blobStoreRepository.getCompressor(), - FORMAT_PARAMS - ); - when(blobContainer.readBlob(fileName + ".dat")).thenReturn( - new ByteArrayInputStream(bytesIndexMetadata.streamInput().readAllBytes()) - ); + when(blobContainer.readBlob(fileName + ".dat")).thenAnswer((invocationOnMock) -> { + BytesReference bytesIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.serialize( + indexMetadata, + fileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytesIndexMetadata.streamInput().readAllBytes()); + }); } catch (IOException e) { throw new RuntimeException(e); } @@ -1238,15 +1299,17 @@ private void mockBlobContainerForGlobalMetadata( ); when(blobContainer.readBlob(mockManifestFileName)).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); - BytesReference bytesGlobalMetadata = RemoteClusterStateService.GLOBAL_METADATA_FORMAT.serialize( - metadata, - "global-metadata-file", - blobStoreRepository.getCompressor(), - FORMAT_PARAMS - ); String[] splitPath = clusterMetadataManifest.getGlobalMetadataFileName().split("/"); - when(blobContainer.readBlob(RemoteClusterStateService.GLOBAL_METADATA_FORMAT.blobName(splitPath[splitPath.length - 1]))).thenReturn( - new ByteArrayInputStream(bytesGlobalMetadata.streamInput().readAllBytes()) + when(blobContainer.readBlob(RemoteClusterStateService.GLOBAL_METADATA_FORMAT.blobName(splitPath[splitPath.length - 1]))).thenAnswer( + (invocationOnMock) -> { + BytesReference bytesGlobalMetadata = RemoteClusterStateService.GLOBAL_METADATA_FORMAT.serialize( + metadata, + "global-metadata-file", + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytesGlobalMetadata.streamInput().readAllBytes()); + } ); } From e618b9bd27af47c4b1a04f4eaabc323a39573c40 Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Sun, 22 Oct 2023 13:28:50 +0530 Subject: [PATCH 066/178] Fix custom metadata not getting stored for remote store not supporting async write (#10812) Signed-off-by: Dhwanil Patel --- .../RemoteStoreClusterStateRestoreIT.java | 28 +++++++++++-------- .../remote/RemoteClusterStateService.java | 8 +++++- .../blobstore/ChecksumBlobStoreFormat.java | 25 +++++++++++++++-- 3 files changed, 47 insertions(+), 14 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index c2cb7cc60f152..f0863966fa222 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.nio.file.Files; +import java.nio.file.Path; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -233,8 +234,7 @@ public void testFullClusterRestoreGlobalMetadata() throws Exception { String prevClusterUUID = clusterService().state().metadata().clusterUUID(); // Create global metadata - register a custom repo - // TODO - uncomment after all customs is also uploaded for all repos - https://github.com/opensearch-project/OpenSearch/issues/10691 - // registerCustomRepository(); + Path repoPath = registerCustomRepository(); // Create global metadata - persistent settings updatePersistentSettings(Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), 34).build()); @@ -263,30 +263,36 @@ public void testFullClusterRestoreGlobalMetadata() throws Exception { verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false); // validate global metadata restored - verifyRestoredRepositories(); + verifyRestoredRepositories(repoPath); verifyRestoredIndexTemplate(); } - private void registerCustomRepository() { + private Path registerCustomRepository() { + Path path = randomRepoPath(); assertAcked( client().admin() .cluster() .preparePutRepository("custom-repo") .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", false)) + .setSettings(Settings.builder().put("location", path).put("compress", false)) .get() ); + return path; } - private void verifyRestoredRepositories() { + private void verifyRestoredRepositories(Path repoPath) { RepositoriesMetadata repositoriesMetadata = clusterService().state().metadata().custom(RepositoriesMetadata.TYPE); - assertEquals(2, repositoriesMetadata.repositories().size()); // includes remote store repo as well + assertEquals(3, repositoriesMetadata.repositories().size()); // includes remote store repo as well assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_NAME).settings())); assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_2_NAME).settings())); - // TODO - uncomment after all customs is also uploaded for all repos - https://github.com/opensearch-project/OpenSearch/issues/10691 - // assertEquals("fs", repositoriesMetadata.repository("custom-repo").type()); - // assertEquals(Settings.builder().put("location", randomRepoPath()).put("compress", false).build(), - // repositoriesMetadata.repository("custom-repo").settings()); + assertEquals("fs", repositoriesMetadata.repository("custom-repo").type()); + assertEquals( + Settings.builder().put("location", repoPath).put("compress", false).build(), + repositoriesMetadata.repository("custom-repo").settings() + ); + + // repo cleanup post verification + clusterAdmin().prepareDeleteRepository("custom-repo").get(); } private void addClusterLevelReadOnlyBlock() throws InterruptedException, ExecutionException { diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 13fbda77d8a2d..dfe3659c1efd1 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -568,7 +568,13 @@ private ClusterMetadataManifest uploadManifest( private void writeMetadataManifest(String clusterName, String clusterUUID, ClusterMetadataManifest uploadManifest, String fileName) throws IOException { final BlobContainer metadataManifestContainer = manifestContainer(clusterName, clusterUUID); - CLUSTER_METADATA_MANIFEST_FORMAT.write(uploadManifest, metadataManifestContainer, fileName, blobStoreRepository.getCompressor()); + CLUSTER_METADATA_MANIFEST_FORMAT.write( + uploadManifest, + metadataManifestContainer, + fileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); } private String fetchPreviousClusterUUID(String clusterName, String clusterUUID) { diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 17cb68f798094..e280141c12bc1 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -170,8 +170,29 @@ public T deserialize(String blobName, NamedXContentRegistry namedXContentRegistr * @param compressor whether to use compression */ public void write(final T obj, final BlobContainer blobContainer, final String name, final Compressor compressor) throws IOException { + write(obj, blobContainer, name, compressor, SNAPSHOT_ONLY_FORMAT_PARAMS); + } + + /** + * Writes blob with resolving the blob name using {@link #blobName} method. + *

+ * The blob will optionally by compressed. + * + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @param compressor whether to use compression + * @param params ToXContent params + */ + public void write( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + final ToXContent.Params params + ) throws IOException { final String blobName = blobName(name); - final BytesReference bytes = serialize(obj, blobName, compressor, SNAPSHOT_ONLY_FORMAT_PARAMS); + final BytesReference bytes = serialize(obj, blobName, compressor, params); blobContainer.writeBlob(blobName, bytes.streamInput(), bytes.length(), false); } @@ -195,7 +216,7 @@ public void writeAsync( final ToXContent.Params params ) throws IOException { if (blobContainer instanceof AsyncMultiStreamBlobContainer == false) { - write(obj, blobContainer, name, compressor); + write(obj, blobContainer, name, compressor, params); listener.onResponse(null); return; } From a09047a4a9870eca642577094c9793256cbe71d1 Mon Sep 17 00:00:00 2001 From: Shivansh Arora <31575408+shiv0408@users.noreply.github.com> Date: Sun, 22 Oct 2023 13:41:50 +0530 Subject: [PATCH 067/178] Removed unnecessary catch statement (#10783) * Removed unnecessary catch statement related to repo missing exception in remote state flow Signed-off-by: Shivansh Arora --- .../main/java/org/opensearch/gateway/GatewayMetaState.java | 7 ------- 1 file changed, 7 deletions(-) diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index 9eb7fb0ca04d0..5d417ce78fe5c 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -67,7 +67,6 @@ import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; import org.opensearch.plugins.MetadataUpgrader; -import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -712,12 +711,6 @@ assert verifyManifestAndClusterState(lastAcceptedManifest, lastAcceptedState) == assert verifyManifestAndClusterState(manifest, clusterState) == true : "Manifest and ClusterState are not in sync"; lastAcceptedManifest = manifest; lastAcceptedState = clusterState; - } catch (RepositoryMissingException e) { - // TODO This logic needs to be modified once PR for repo registration during bootstrap is pushed - // https://github.com/opensearch-project/OpenSearch/pull/9105/ - // After the above PR is pushed, we can remove this silent failure and throw the exception instead. - logger.error("Remote repository is not yet registered"); - lastAcceptedState = clusterState; } catch (Exception e) { handleExceptionOnWrite(e); } From 9b7a9d0026aa379537804fd24b95619abe88e1c0 Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Sun, 22 Oct 2023 16:16:10 +0530 Subject: [PATCH 068/178] Override local disk state if we are able to restore from remote (#10748) * Override local disk state if we are able to restore from remote Signed-off-by: bansvaru --- .../RemoteStoreClusterStateRestoreIT.java | 68 +++++++++++++ .../opensearch/gateway/GatewayMetaState.java | 7 +- .../recovery/RemoteStoreRestoreService.java | 95 +++---------------- 3 files changed, 89 insertions(+), 81 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index f0863966fa222..29786158bc73c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -21,6 +21,7 @@ import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; @@ -32,6 +33,7 @@ import java.util.Objects; import java.util.concurrent.ExecutionException; +import static org.opensearch.cluster.coordination.ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_SETTING; import static org.opensearch.cluster.metadata.Metadata.CLUSTER_READ_ONLY_BLOCK; import static org.opensearch.cluster.metadata.Metadata.SETTING_READ_ONLY_SETTING; @@ -94,6 +96,72 @@ public void testFullClusterRestore() throws Exception { verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); } + /** + * This test scenario covers the case where right after remote state restore and persisting it to disk via LucenePersistedState, full cluster restarts. + * This is a special case for remote state as at this point cluster uuid in the restored state is still ClusterState.UNKNOWN_UUID as we persist it disk. + * After restart the local disk state will be read but should be again overridden with remote state. + * + * 1. Form a cluster and index few docs + * 2. Replace all nodes to remove all local disk state + * 3. Start cluster manager node without correct seeding to ensure local disk state is written with cluster uuid ClusterState.UNKNOWN_UUID but with remote restored Metadata + * 4. Restart the cluster manager node with correct seeding. + * 5. After restart the cluster manager picks up the local disk state with has same Metadata as remote but cluster uuid is still ClusterState.UNKNOWN_UUID + * 6. The cluster manager will try to restore from remote again. + * 7. Metadata loaded from local disk state will be overridden with remote Metadata and no conflict should arise. + * 8. Add data nodes to recover index data + * 9. Verify Metadata and index data is restored. + */ + public void testFullClusterStateRestore() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // index some data to generate files in remote directory + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + + // stop all nodes + internalCluster().stopAllNodes(); + + // start a cluster manager node with no cluster manager seeding. + // This should fail with IllegalStateException as cluster manager fails to form without any initial seed + assertThrows( + IllegalStateException.class, + () -> internalCluster().startClusterManagerOnlyNodes( + clusterManagerNodeCount, + Settings.builder() + .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey()) // disable seeding during bootstrapping + .build() + ) + ); + + // verify cluster manager not elected + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID) + : "Disabling Cluster manager seeding failed. cluster uuid is not unknown"; + + // restart cluster manager with correct seed + internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder() + .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) // Seed with correct Cluster Manager node + .build(); + } + }); + + // validate new cluster state formed + newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID) : "cluster restart not successful. cluster uuid is still unknown"; + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + validateMetadata(List.of(INDEX_NAME)); + + // start data nodes to trigger index data recovery + internalCluster().startDataOnlyNodes(dataNodeCount); + verifyRestoredData(indexStats, INDEX_NAME); + } + public void testFullClusterRestoreMultipleIndices() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index 5d417ce78fe5c..f855449c708d2 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -174,7 +174,9 @@ public void start( if (ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID) == false) { // Load state from remote final RemoteRestoreResult remoteRestoreResult = remoteStoreRestoreService.restore( - clusterState, + // Remote Metadata should always override local disk Metadata + // if local disk Metadata's cluster uuid is UNKNOWN_UUID + ClusterState.builder(clusterState).metadata(Metadata.EMPTY_METADATA).build(), lastKnownClusterUUID, false, new String[] {} @@ -549,6 +551,9 @@ static class LucenePersistedState implements PersistedState { // out by this version of OpenSearch. TODO TBD should we avoid indexing when possible? final PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter(); try { + // During remote state restore, there will be non empty metadata getting persisted with cluster UUID as + // ClusterState.UNKOWN_UUID . The valid UUID will be generated and persisted along with the first cluster state getting + // published. writer.writeFullStateAndCommit(currentTerm, lastAcceptedState); } catch (Exception e) { try { diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index 6692d521b8f65..9541d13421e27 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -40,12 +40,10 @@ import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -146,6 +144,11 @@ public RemoteRestoreResult restore( || restoreClusterUUID.isBlank()) == false; if (metadataFromRemoteStore) { try { + // Restore with current cluster UUID will fail as same indices would be present in the cluster which we are trying to + // restore + if (currentState.metadata().clusterUUID().equals(restoreClusterUUID)) { + throw new IllegalArgumentException("clusterUUID to restore from should be different from current cluster UUID"); + } remoteMetadata = remoteClusterStateService.getLatestMetadata(currentState.getClusterName().value(), restoreClusterUUID); remoteMetadata.getIndices().values().forEach(indexMetadata -> { indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); @@ -158,12 +161,21 @@ public RemoteRestoreResult restore( IndexMetadata indexMetadata = currentState.metadata().index(indexName); if (indexMetadata == null) { logger.warn("Index restore is not supported for non-existent index. Skipping: {}", indexName); + } else if (indexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false) == false) { + logger.warn("Remote store is not enabled for index: {}", indexName); + } else if (restoreAllShards && IndexMetadata.State.CLOSE.equals(indexMetadata.getState()) == false) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "cannot restore index [%s] because an open index with same name/uuid already exists in the cluster.", + indexName + ) + " Close the existing index." + ); } else { indexMetadataMap.put(indexName, new Tuple<>(false, indexMetadata)); } } } - validate(currentState, indexMetadataMap, restoreClusterUUID, restoreAllShards); return executeRestore(currentState, indexMetadataMap, restoreAllShards, remoteMetadata); } @@ -272,83 +284,6 @@ private void restoreGlobalMetadata(Metadata.Builder mdBuilder, Metadata remoteMe repositoriesMetadata.ifPresent(metadata -> mdBuilder.putCustom(RepositoriesMetadata.TYPE, metadata)); } - /** - * Performs various validations needed before executing restore - * @param currentState current cluster state - * @param indexMetadataMap map of index metadata to restore - * @param restoreClusterUUID cluster UUID used to restore IndexMetadata - * @param restoreAllShards indicates if all shards of the index needs to be restored. This flat is ignored if remoteClusterUUID is provided - */ - private void validate( - ClusterState currentState, - Map> indexMetadataMap, - @Nullable String restoreClusterUUID, - boolean restoreAllShards - ) throws IllegalStateException, IllegalArgumentException { - String errorMsg = "cannot restore index [%s] because an open index with same name/uuid already exists in the cluster."; - - // Restore with current cluster UUID will fail as same indices would be present in the cluster which we are trying to - // restore - if (currentState.metadata().clusterUUID().equals(restoreClusterUUID)) { - throw new IllegalArgumentException("clusterUUID to restore from should be different from current cluster UUID"); - } - for (Map.Entry> indexMetadataEntry : indexMetadataMap.entrySet()) { - String indexName = indexMetadataEntry.getKey(); - IndexMetadata indexMetadata = indexMetadataEntry.getValue().v2(); - String indexUUID = indexMetadata.getIndexUUID(); - boolean metadataFromRemoteStore = indexMetadataEntry.getValue().v1(); - if (indexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false)) { - if (metadataFromRemoteStore) { - Set graveyardIndexNames = new HashSet<>(); - Set graveyardIndexUUID = new HashSet<>(); - Set liveClusterIndexUUIDs = currentState.metadata() - .indices() - .values() - .stream() - .map(IndexMetadata::getIndexUUID) - .collect(Collectors.toSet()); - - currentState.metadata().indexGraveyard().getTombstones().forEach(tombstone -> { - graveyardIndexNames.add(tombstone.getIndex().getName()); - graveyardIndexUUID.add(tombstone.getIndex().getUUID()); - }); - - // Since updates to graveyard are synced to remote we should neven land in a situation where remote contain index - // metadata for graveyard index. - assert graveyardIndexNames.contains(indexName) == false : String.format( - Locale.ROOT, - "Index name [%s] exists in graveyard!", - indexName - ); - assert graveyardIndexUUID.contains(indexUUID) == false : String.format( - Locale.ROOT, - "Index UUID [%s] exists in graveyard!", - indexUUID - ); - - // Any indices being restored from remote cluster state should not already be part of the cluster as this causes - // conflict - boolean sameNameIndexExists = currentState.metadata().hasIndex(indexName); - boolean sameUUIDIndexExists = liveClusterIndexUUIDs.contains(indexUUID); - if (sameNameIndexExists || sameUUIDIndexExists) { - String finalErrorMsg = String.format(Locale.ROOT, errorMsg, indexName); - logger.info(finalErrorMsg); - throw new IllegalStateException(finalErrorMsg); - } - - boolean isHidden = IndexMetadata.INDEX_HIDDEN_SETTING.get(indexMetadata.getSettings()); - createIndexService.validateIndexName(indexName, currentState); - createIndexService.validateDotIndex(indexName, isHidden); - shardLimitValidator.validateShardLimit(indexName, indexMetadata.getSettings(), currentState); - } else if (restoreAllShards && IndexMetadata.State.CLOSE.equals(indexMetadata.getState()) == false) { - throw new IllegalStateException(String.format(Locale.ROOT, errorMsg, indexName) + " Close the existing index."); - } - } else { - logger.warn("Remote store is not enabled for index: {}", indexName); - } - } - } - /** * Result of a remote restore operation. */ From 5e7c0689534f94b5f09f85eb0a64b42f79899241 Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Mon, 23 Oct 2023 11:24:13 +0530 Subject: [PATCH 069/178] fix integ test: check for red indices and then manually restore to green (#10838) Signed-off-by: bansvaru --- .../remotestore/RemoteStoreClusterStateRestoreIT.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 29786158bc73c..c429b587a1399 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -111,7 +111,7 @@ public void testFullClusterRestore() throws Exception { * 8. Add data nodes to recover index data * 9. Verify Metadata and index data is restored. */ - public void testFullClusterStateRestore() throws Exception { + public void testFullClusterRestoreDoesntFailWithConflictingLocalState() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; int dataNodeCount = shardCount * (replicaCount + 1); @@ -159,7 +159,7 @@ public Settings onNodeStopped(String nodeName) { // start data nodes to trigger index data recovery internalCluster().startDataOnlyNodes(dataNodeCount); - verifyRestoredData(indexStats, INDEX_NAME); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); } public void testFullClusterRestoreMultipleIndices() throws Exception { From ef4b327ab48a6a674b657049e440418fe427cb0c Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Sun, 22 Oct 2023 22:54:43 -0700 Subject: [PATCH 070/178] Update version check made in PR #10656. (#10835) Signed-off-by: Rishikesh1159 --- .../action/admin/cluster/node/stats/NodeStats.java | 7 +++---- .../opensearch/index/SegmentReplicationRejectionStats.java | 6 ++---- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 3d37056956c69..874713b51d627 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -215,8 +215,7 @@ public NodeStats(StreamInput in) throws IOException { } else { resourceUsageStats = null; } - // TODO: change to V_2_12_0 on main after backport to 2.x - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { segmentReplicationRejectionStats = in.readOptionalWriteable(SegmentReplicationRejectionStats::new); } else { segmentReplicationRejectionStats = null; @@ -431,6 +430,7 @@ public SegmentReplicationRejectionStats getSegmentReplicationRejectionStats() { return segmentReplicationRejectionStats; } + @Nullable public RepositoriesStats getRepositoriesStats() { return repositoriesStats; } @@ -481,8 +481,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(resourceUsageStats); } - // TODO: change to V_2_12_0 on main after backport to 2.x - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(segmentReplicationRejectionStats); } if (out.getVersion().onOrAfter(Version.V_2_12_0)) { diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java index 9f9f150ebe2d7..492f253bbcb7c 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java @@ -34,8 +34,7 @@ public SegmentReplicationRejectionStats(final long totalRejectionCount) { } public SegmentReplicationRejectionStats(StreamInput in) throws IOException { - // TODO: change to V_2_12_0 on main after backport to 2.x - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { this.totalRejectionCount = in.readVLong(); } } @@ -53,8 +52,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - // TODO: change to V_2_12_0 on main after backport to 2.x - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeVLong(totalRejectionCount); } } From b5ef0788699152fb2b1ae158b518c212ba784184 Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Mon, 23 Oct 2023 13:56:33 +0530 Subject: [PATCH 071/178] Fix remote cluster restore for data stream (#10777) * Fix remote cluster restore for data stream, also added integ test Signed-off-by: Dhwanil Patel --- .../datastream/DataStreamTestCase.java | 3 ++- .../RemoteStoreClusterStateRestoreIT.java | 9 +++++++ .../remote/RemoteClusterStateService.java | 5 +++- .../RemoteClusterStateServiceTests.java | 6 ++--- .../opensearch/test/InternalTestCluster.java | 12 ++++++++++ .../test/OpenSearchIntegTestCase.java | 24 +++++++++++++++++++ 6 files changed, 54 insertions(+), 5 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java index 50ff76c6b62f3..82ab5b0118c0e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java @@ -37,6 +37,7 @@ public AcknowledgedResponse createDataStream(String name) throws Exception { CreateDataStreamAction.Request request = new CreateDataStreamAction.Request(name); AcknowledgedResponse response = client().admin().indices().createDataStream(request).get(); assertThat(response.isAcknowledged(), is(true)); + performRemoteStoreTestAction(); return response; } @@ -67,6 +68,7 @@ public RolloverResponse rolloverDataStream(String name) throws Exception { RolloverResponse response = client().admin().indices().rolloverIndex(request).get(); assertThat(response.isAcknowledged(), is(true)); assertThat(response.isRolledOver(), is(true)); + performRemoteStoreTestAction(); return response; } @@ -109,5 +111,4 @@ public AcknowledgedResponse deleteIndexTemplate(String name) throws Exception { assertThat(response.isAcknowledged(), is(true)); return response; } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index c429b587a1399..e9afd6d36bb87 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -9,6 +9,7 @@ package org.opensearch.remotestore; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.indices.datastream.DataStreamRolloverIT; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.opensearch.cluster.ClusterState; @@ -291,6 +292,14 @@ private void validateCurrentMetadata() throws Exception { }); } + public void testDataStreamPostRemoteStateRestore() throws Exception { + new DataStreamRolloverIT() { + protected boolean triggerRemoteStateRestore() { + return true; + } + }.testDataStreamRollover(); + } + public void testFullClusterRestoreGlobalMetadata() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index dfe3659c1efd1..b505b3e2d4c0c 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -732,7 +732,10 @@ public Metadata getLatestMetadata(String clusterName, String clusterUUID) { // Fetch Index Metadata Map indices = getIndexMetadataMap(clusterName, clusterUUID, clusterMetadataManifest.get()); - return Metadata.builder(globalMetadata).indices(indices).build(); + Map indexMetadataMap = new HashMap<>(); + indices.values().forEach(indexMetadata -> { indexMetadataMap.put(indexMetadata.getIndex().getName(), indexMetadata); }); + + return Metadata.builder(globalMetadata).indices(indexMetadataMap).build(); } private Metadata getGlobalMetadata(String clusterName, String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 55094400581b4..827735f6bcc0b 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -826,9 +826,9 @@ public void testReadLatestIndexMetadataSuccess() throws IOException { ).getIndices(); assertEquals(indexMetadataMap.size(), 1); - assertEquals(indexMetadataMap.get(index.getUUID()).getIndex().getName(), index.getName()); - assertEquals(indexMetadataMap.get(index.getUUID()).getNumberOfShards(), indexMetadata.getNumberOfShards()); - assertEquals(indexMetadataMap.get(index.getUUID()).getNumberOfReplicas(), indexMetadata.getNumberOfReplicas()); + assertEquals(indexMetadataMap.get(index.getName()).getIndex().getName(), index.getName()); + assertEquals(indexMetadataMap.get(index.getName()).getNumberOfShards(), indexMetadata.getNumberOfShards()); + assertEquals(indexMetadataMap.get(index.getName()).getNumberOfReplicas(), indexMetadata.getNumberOfReplicas()); } public void testMarkLastStateAsCommittedSuccess() throws IOException { diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 63d8f069bebea..952cd6c085966 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -1871,6 +1871,18 @@ public void stopAllNodes() { } } + /** + * Replace all nodes by stopping all current node and starting new node. + * Used for remote store test cases, where remote state is restored. + */ + public void resetCluster() { + int totalClusterManagerNodes = numClusterManagerNodes(); + int totalDataNodes = numDataNodes(); + stopAllNodes(); + startClusterManagerOnlyNodes(totalClusterManagerNodes); + startDataOnlyNodes(totalDataNodes); + } + private synchronized void startAndPublishNodesAndClients(List nodeAndClients) { if (nodeAndClients.size() > 0) { final int newClusterManagers = (int) nodeAndClients.stream() diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index c16cc1d2a5fba..ad27d9834f159 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -789,6 +789,30 @@ protected Settings featureFlagSettings() { return featureSettings.build(); } + /** + * Represent if it needs to trigger remote state restore or not. + * For tests with remote store enabled domain, it will be overridden to true. + * + * @return if needs to perform remote state restore or not + */ + protected boolean triggerRemoteStateRestore() { + return false; + } + + /** + * For tests with remote cluster state, it will reset the cluster and cluster state will be + * restored from remote. + */ + protected void performRemoteStoreTestAction() { + if (triggerRemoteStateRestore()) { + String clusterUUIDBefore = clusterService().state().metadata().clusterUUID(); + internalCluster().resetCluster(); + String clusterUUIDAfter = clusterService().state().metadata().clusterUUID(); + // assertion that UUID is changed post restore. + assertFalse(clusterUUIDBefore.equals(clusterUUIDAfter)); + } + } + /** * Creates one or more indices and asserts that the indices are acknowledged. If one of the indices * already exists this method will fail and wipe all the indices created so far. From 7453daa2b7ce698fa7bf3647002eb12394a5dae1 Mon Sep 17 00:00:00 2001 From: Ashish Date: Mon, 23 Oct 2023 14:10:14 +0530 Subject: [PATCH 072/178] [Remote Store] Sync segments in refresh listener on refresh after commit (#10830) * [Remote Store] Sync segments in refresh listener on refresh after commit Signed-off-by: Ashish Singh * Add Integration Tests Signed-off-by: Ashish Singh * Add comments and java doc Signed-off-by: Ashish Singh --------- Signed-off-by: Ashish Singh --- .../remotestore/RemoteStoreStatsIT.java | 76 ++++++++++++++++- .../remote/RemoteTranslogTransferTracker.java | 57 +++++++++++++ .../opensearch/index/shard/IndexShard.java | 8 ++ .../shard/RemoteStoreRefreshListener.java | 84 +++++++++++++++---- .../index/translog/RemoteFsTranslog.java | 8 +- 5 files changed, 215 insertions(+), 18 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 8ae25c6758195..5e91176ed0473 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -15,6 +15,8 @@ import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.FollowersChecker; +import org.opensearch.cluster.coordination.LeaderChecker; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; @@ -23,15 +25,20 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.Before; +import org.opensearch.test.disruption.NetworkDisruption; +import org.opensearch.test.transport.MockTransportService; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Locale; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -44,12 +51,17 @@ public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; - @Before + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + public void setup() { internalCluster().startNodes(3); } public void testStatsResponseFromAllNodes() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -118,6 +130,7 @@ public void testStatsResponseFromAllNodes() { } public void testStatsResponseAllShards() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -175,6 +188,7 @@ public void testStatsResponseAllShards() { } public void testStatsResponseFromLocalNode() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -236,6 +250,7 @@ public void testStatsResponseFromLocalNode() { } public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exception { + setup(); // Scenario: // - Create index with single primary and single replica shard // - Disable Refresh Interval for the index @@ -325,6 +340,7 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce } public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() throws Exception { + setup(); // Scenario: // - Create index with single primary and N-1 replica shards (N = no of data nodes) // - Disable Refresh Interval for the index @@ -416,6 +432,7 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr } public void testStatsOnShardRelocation() { + setup(); // Scenario: // - Create index with single primary and single replica shard // - Index documents @@ -471,6 +488,7 @@ public void testStatsOnShardRelocation() { } public void testStatsOnShardUnassigned() throws IOException { + setup(); // Scenario: // - Create index with single primary and two replica shard // - Index documents @@ -497,6 +515,7 @@ public void testStatsOnShardUnassigned() throws IOException { } public void testStatsOnRemoteStoreRestore() throws IOException { + setup(); // Creating an index with primary shard count == total nodes in cluster and 0 replicas int dataNodeCount = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); createIndex(INDEX_NAME, remoteStoreIndexSettings(0, dataNodeCount)); @@ -544,6 +563,7 @@ public void testStatsOnRemoteStoreRestore() throws IOException { } public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exception { + setup(); // Create an index with one primary and one replica shard createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); ensureGreen(INDEX_NAME); @@ -581,6 +601,58 @@ public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exce }, 5, TimeUnit.SECONDS); } + public void testStatsCorrectnessOnFailover() { + Settings clusterSettings = Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "100ms") + .put(LeaderChecker.LEADER_CHECK_INTERVAL_SETTING.getKey(), "500ms") + .put(LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "100ms") + .put(FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "500ms") + .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(nodeSettings(0)) + .build(); + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(clusterSettings); + internalCluster().startDataOnlyNodes(2, clusterSettings); + + // Create an index with one primary and one replica shard + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); + ensureGreen(INDEX_NAME); + + // Index some docs and refresh + indexDocs(); + refresh(INDEX_NAME); + + String primaryNode = primaryNodeName(INDEX_NAME); + String replicaNode = replicaNodeName(INDEX_NAME); + + // Start network disruption - primary node will be isolated + Set nodesInOneSide = Stream.of(clusterManagerNode, replicaNode).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOtherSide = Stream.of(primaryNode).collect(Collectors.toCollection(HashSet::new)); + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + logger.info("--> network disruption is started"); + networkDisruption.startDisrupting(); + ensureStableCluster(2, clusterManagerNode); + + RemoteStoreStatsResponse response = client(clusterManagerNode).admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, "0"); + List matches = Arrays.stream(response.getRemoteStoreStats()) + .filter(stat -> indexShardId.equals(stat.getSegmentStats().shardId.toString())) + .collect(Collectors.toList()); + assertEquals(1, matches.size()); + RemoteSegmentTransferTracker.Stats segmentStats = matches.get(0).getSegmentStats(); + assertEquals(0, segmentStats.refreshTimeLagMs); + + networkDisruption.stopDisrupting(); + internalCluster().clearDisruptionScheme(); + ensureStableCluster(3, clusterManagerNode); + ensureGreen(INDEX_NAME); + logger.info("Test completed"); + } + private void indexDocs() { for (int i = 0; i < randomIntBetween(5, 10); i++) { if (randomBoolean()) { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java index 1a9896540212e..4214a87049350 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java @@ -232,6 +232,63 @@ public RemoteTranslogTransferTracker.Stats stats() { ); } + @Override + public String toString() { + return "RemoteTranslogTransferStats{" + + "lastSuccessfulUploadTimestamp=" + + lastSuccessfulUploadTimestamp.get() + + "," + + "totalUploadsStarted=" + + totalUploadsStarted.get() + + "," + + "totalUploadsSucceeded=" + + totalUploadsSucceeded.get() + + "," + + "totalUploadsFailed=" + + totalUploadsFailed.get() + + "," + + "uploadBytesStarted=" + + uploadBytesStarted.get() + + "," + + "uploadBytesFailed=" + + uploadBytesFailed.get() + + "," + + "totalUploadTimeInMillis=" + + totalUploadTimeInMillis.get() + + "," + + "uploadBytesMovingAverage=" + + uploadBytesMovingAverageReference.get().getAverage() + + "," + + "uploadBytesPerSecMovingAverage=" + + uploadBytesPerSecMovingAverageReference.get().getAverage() + + "," + + "uploadTimeMovingAverage=" + + uploadTimeMsMovingAverageReference.get().getAverage() + + "," + + "lastSuccessfulDownloadTimestamp=" + + lastSuccessfulDownloadTimestamp.get() + + "," + + "totalDownloadsSucceeded=" + + totalDownloadsSucceeded.get() + + "," + + "downloadBytesSucceeded=" + + downloadBytesSucceeded.get() + + "," + + "totalDownloadTimeInMillis=" + + totalDownloadTimeInMillis.get() + + "," + + "downloadBytesMovingAverage=" + + downloadBytesMovingAverageReference.get().getAverage() + + "," + + "downloadBytesPerSecMovingAverage=" + + downloadBytesPerSecMovingAverageReference.get().getAverage() + + "," + + "downloadTimeMovingAverage=" + + downloadTimeMsMovingAverageReference.get().getAverage() + + "," + + "}"; + } + /** * Represents the tracker's state as seen in the stats API. * diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index f990a3b56e856..fb4e9056153aa 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4774,6 +4774,8 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal) throws IOE * @throws IOException if exception occurs while reading segments from remote store. */ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runnable onFileSync) throws IOException { + boolean syncSegmentSuccess = false; + long startTimeMs = System.currentTimeMillis(); assert indexSettings.isRemoteStoreEnabled(); logger.trace("Downloading segments from remote segment store"); RemoteSegmentStoreDirectory remoteDirectory = getRemoteDirectory(); @@ -4823,9 +4825,15 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runn : "There should not be any segments file in the dir"; store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint); } + syncSegmentSuccess = true; } catch (IOException e) { throw new IndexShardRecoveryException(shardId, "Exception while copying segment files from remote segment store", e); } finally { + logger.trace( + "syncSegmentsFromRemoteSegmentStore success={} elapsedTime={}", + syncSegmentSuccess, + (System.currentTimeMillis() - startTimeMs) + ); store.decRef(); remoteStore.decRef(); } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index c650edc31da8d..3e97b07abfb5d 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -123,14 +123,13 @@ public void beforeRefresh() throws IOException {} @Override protected void runAfterRefreshExactlyOnce(boolean didRefresh) { - if (shouldSync(didRefresh)) { + // We have 2 separate methods to check if sync needs to be done or not. This is required since we use the return boolean + // from isReadyForUpload to schedule refresh retries as the index shard or the primary mode are not in complete + // ready state. + if (shouldSync(didRefresh) && isReadyForUpload()) { segmentTracker.updateLocalRefreshTimeAndSeqNo(); try { - if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { - logger.debug("primaryTerm update from={} to={}", primaryTerm, indexShard.getOperationPrimaryTerm()); - this.primaryTerm = indexShard.getOperationPrimaryTerm(); - this.remoteDirectory.init(); - } + initializeRemoteDirectoryOnTermUpdate(); try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { Collection localSegmentsPostRefresh = segmentInfosGatedCloseable.get().files(true); updateLocalSizeMapAndTracker(localSegmentsPostRefresh); @@ -160,20 +159,20 @@ protected boolean performAfterRefreshWithPermit(boolean didRefresh) { } private boolean shouldSync(boolean didRefresh) { - // The third condition exists for uploading the zero state segments where the refresh has not changed the reader reference, but it - // is important to upload the zero state segments so that the restore does not break. return this.primaryTerm != indexShard.getOperationPrimaryTerm() + // If the readers change, didRefresh is always true. || didRefresh - || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty(); + // The third condition exists for uploading the zero state segments where the refresh has not changed the reader + // reference, but it is important to upload the zero state segments so that the restore does not break. + || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty() + // When the shouldSync is called the first time, then 1st condition on primary term is true. But after that + // we update the primary term and the same condition would not evaluate to true again in syncSegments. + // Below check ensures that if there is commit, then that gets picked up by both 1st and 2nd shouldSync call. + || isRefreshAfterCommitSafe(); } private boolean syncSegments() { - if (indexShard.getReplicationTracker().isPrimaryMode() == false || indexShard.state() == IndexShardState.CLOSED) { - logger.debug( - "Skipped syncing segments with primaryMode={} indexShardState={}", - indexShard.getReplicationTracker().isPrimaryMode(), - indexShard.state() - ); + if (isReadyForUpload() == false) { // Following check is required to enable retry and make sure that we do not lose this refresh event // When primary shard is restored from remote store, the recovery happens first followed by changing // primaryMode to true. Due to this, the refresh that is triggered post replay of translog will not go through @@ -323,6 +322,19 @@ private boolean isRefreshAfterCommit() throws IOException { && !remoteDirectory.containsFile(lastCommittedLocalSegmentFileName, getChecksumOfLocalFile(lastCommittedLocalSegmentFileName))); } + /** + * Returns if the current refresh has happened after a commit. + * @return true if this refresh has happened on account of a commit. If otherwise or exception, returns false. + */ + private boolean isRefreshAfterCommitSafe() { + try { + return isRefreshAfterCommit(); + } catch (Exception e) { + logger.info("Exception occurred in isRefreshAfterCommitSafe", e); + } + return false; + } + void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos segmentInfos, ReplicationCheckpoint replicationCheckpoint) throws IOException { final long maxSeqNo = ((InternalEngine) indexShard.getEngine()).currentOngoingRefreshCheckpoint(); @@ -439,6 +451,48 @@ private void updateFinalStatusInSegmentTracker(boolean uploadStatus, long bytesB } } + /** + * On primary term update, we (re)initialise the remote segment directory to reflect the latest metadata file that + * has been uploaded to remote store successfully. This method also updates the segment tracker about the latest + * uploaded segment files onto remote store. + */ + private void initializeRemoteDirectoryOnTermUpdate() throws IOException { + if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { + logger.trace("primaryTerm update from={} to={}", primaryTerm, indexShard.getOperationPrimaryTerm()); + this.primaryTerm = indexShard.getOperationPrimaryTerm(); + RemoteSegmentMetadata uploadedMetadata = this.remoteDirectory.init(); + + // During failover, the uploaded metadata would have names of files that have been uploaded to remote store. + // Here we update the tracker with latest remote uploaded files. + if (uploadedMetadata != null) { + segmentTracker.setLatestUploadedFiles(uploadedMetadata.getMetadata().keySet()); + } + } + } + + /** + * This checks for readiness of the index shard and primary mode. This has separated from shouldSync since we use the + * returned value of this method for scheduling retries in syncSegments method. + * @return true iff primaryMode is true and index shard is not in closed state. + */ + private boolean isReadyForUpload() { + boolean isReady = indexShard.getReplicationTracker().isPrimaryMode() && indexShard.state() != IndexShardState.CLOSED; + if (isReady == false) { + StringBuilder sb = new StringBuilder("Skipped syncing segments with"); + if (indexShard.getReplicationTracker() != null) { + sb.append(" primaryMode=").append(indexShard.getReplicationTracker().isPrimaryMode()); + } + if (indexShard.state() != null) { + sb.append(" indexShardState=").append(indexShard.state()); + } + if (indexShard.getEngineOrNull() != null) { + sb.append(" engineType=").append(indexShard.getEngine().getClass().getSimpleName()); + } + logger.trace(sb.toString()); + } + return isReady; + } + /** * Creates an {@link UploadListener} containing the stats population logic which would be triggered before and after segment upload events */ diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 2dd9b1a545d4a..db85a37b556fc 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -161,6 +161,7 @@ public static void download(Repository repository, ShardId shardId, ThreadPool t remoteTranslogTransferTracker ); RemoteFsTranslog.download(translogTransferManager, location, logger); + logger.trace(remoteTranslogTransferTracker.toString()); } static void download(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { @@ -173,15 +174,20 @@ static void download(TranslogTransferManager translogTransferManager, Path locat */ IOException ex = null; for (int i = 0; i <= DOWNLOAD_RETRIES; i++) { + boolean success = false; + long startTimeMs = System.currentTimeMillis(); try { downloadOnce(translogTransferManager, location, logger); + success = true; return; } catch (FileNotFoundException | NoSuchFileException e) { // continue till download retries ex = e; + } finally { + logger.trace("downloadOnce success={} timeElapsed={}", success, (System.currentTimeMillis() - startTimeMs)); } } - logger.debug("Exhausted all download retries during translog/checkpoint file download"); + logger.info("Exhausted all download retries during translog/checkpoint file download"); throw ex; } From 218a2ef7c6f670f99cc20d22f8866d7597718640 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Mon, 23 Oct 2023 14:46:26 +0530 Subject: [PATCH 073/178] [Remote Store] Add extra buffer before deleting older generations of translog (#10817) --------- Signed-off-by: Gaurav Bafna --- .../common/settings/IndexScopedSettings.java | 1 + .../org/opensearch/index/IndexSettings.java | 19 ++ .../index/translog/RemoteFsTranslog.java | 2 +- .../index/translog/RemoteFsTranslogTests.java | 181 +++++++++++++----- 4 files changed, 154 insertions(+), 49 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 83bf8c82ee3dd..62e8faf33e1fa 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -221,6 +221,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for remote translog IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING, // Settings for remote store enablement IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 99d2b5a74c406..00e765d73f77f 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -668,6 +668,14 @@ public static IndexMergePolicy fromString(String text) { Property.IndexScope ); + public static final Setting INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING = Setting.intSetting( + "index.remote_store.translog.keep_extra_gen", + 100, + 0, + Property.Dynamic, + Property.IndexScope + ); + private final Index index; private final Version version; private final Logger logger; @@ -680,6 +688,7 @@ public static IndexMergePolicy fromString(String text) { private final String remoteStoreTranslogRepository; private final String remoteStoreRepository; private final boolean isRemoteSnapshot; + private int remoteTranslogKeepExtraGen; private Version extendedCompatibilitySnapshotVersion; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock @@ -850,6 +859,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti remoteStoreTranslogRepository = settings.get(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY); remoteTranslogUploadBufferInterval = INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY); + this.remoteTranslogKeepExtraGen = INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.get(settings); isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); if (isRemoteSnapshot && FeatureFlags.isEnabled(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY)) { @@ -1021,6 +1031,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setRemoteTranslogUploadBufferInterval ); + scopedSettings.addSettingsUpdateConsumer(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING, this::setRemoteTranslogKeepExtraGen); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { @@ -1300,6 +1311,10 @@ public TimeValue getRemoteTranslogUploadBufferInterval() { return remoteTranslogUploadBufferInterval; } + public int getRemoteTranslogExtraKeep() { + return remoteTranslogKeepExtraGen; + } + /** * Returns true iff the remote translog buffer interval setting exists or in other words is explicitly set. */ @@ -1311,6 +1326,10 @@ public void setRemoteTranslogUploadBufferInterval(TimeValue remoteTranslogUpload this.remoteTranslogUploadBufferInterval = remoteTranslogUploadBufferInterval; } + public void setRemoteTranslogKeepExtraGen(int extraGen) { + this.remoteTranslogKeepExtraGen = extraGen; + } + /** * Returns this interval in which the shards of this index are asynchronously refreshed. {@code -1} means async refresh is disabled. */ diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index db85a37b556fc..a305a774f5854 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -431,7 +431,7 @@ public void trimUnreferencedReaders() throws IOException { // cleans up remote translog files not referenced in latest uploaded metadata. // This enables us to restore translog from the metadata in case of failover or relocation. Set generationsToDelete = new HashSet<>(); - for (long generation = minRemoteGenReferenced - 1; generation >= 0; generation--) { + for (long generation = minRemoteGenReferenced - 1 - indexSettings().getRemoteTranslogExtraKeep(); generation >= 0; generation--) { if (fileTransferTracker.uploaded(Translog.getFilename(generation)) == false) { break; } diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 42e0df2dc90c1..3cb65610fab58 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -97,6 +97,7 @@ import java.util.zip.CheckedInputStream; import static org.opensearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; +import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING; import static org.opensearch.index.translog.RemoteFsTranslog.TRANSLOG; import static org.opensearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; @@ -124,6 +125,8 @@ public class RemoteFsTranslogTests extends OpenSearchTestCase { private ThreadPool threadPool; private final static String METADATA_DIR = "metadata"; private final static String DATA_DIR = "data"; + + AtomicInteger writeCalls = new AtomicInteger(); BlobStoreRepository repository; BlobStoreTransferService blobStoreTransferService; @@ -163,13 +166,13 @@ public void tearDown() throws Exception { private RemoteFsTranslog create(Path path) throws IOException { final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); - return create(path, createRepository(), translogUUID); + return create(path, createRepository(), translogUUID, 0); } - private RemoteFsTranslog create(Path path, BlobStoreRepository repository, String translogUUID) throws IOException { + private RemoteFsTranslog create(Path path, BlobStoreRepository repository, String translogUUID, int extraGenToKeep) throws IOException { this.repository = repository; globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - final TranslogConfig translogConfig = getTranslogConfig(path); + final TranslogConfig translogConfig = getTranslogConfig(path, extraGenToKeep); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); threadPool = new TestThreadPool(getClass().getName()); blobStoreTransferService = new BlobStoreTransferService(repository.blobStore(), threadPool); @@ -185,10 +188,17 @@ private RemoteFsTranslog create(Path path, BlobStoreRepository repository, Strin primaryMode::get, new RemoteTranslogTransferTracker(shardId, 10) ); + } + private RemoteFsTranslog create(Path path, BlobStoreRepository repository, String translogUUID) throws IOException { + return create(path, repository, translogUUID, 0); } private TranslogConfig getTranslogConfig(final Path path) { + return getTranslogConfig(path, 0); + } + + private TranslogConfig getTranslogConfig(final Path path, int gensToKeep) { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) // only randomize between nog age retention and a long one, so failures will have a chance of reproducing @@ -196,6 +206,7 @@ private TranslogConfig getTranslogConfig(final Path path) { .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), randomIntBetween(-1, 2048) + "b") .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), gensToKeep) .build(); return getTranslogConfig(path, settings); } @@ -372,6 +383,111 @@ public void testSimpleOperations() throws IOException { } + private TranslogConfig getConfig(int gensToKeep) { + Path tempDir = createTempDir(); + final TranslogConfig temp = getTranslogConfig(tempDir, gensToKeep); + final TranslogConfig config = new TranslogConfig( + temp.getShardId(), + temp.getTranslogPath(), + temp.getIndexSettings(), + temp.getBigArrays(), + new ByteSizeValue(1, ByteSizeUnit.KB), + "" + ); + return config; + } + + private ChannelFactory getChannelFactory() { + writeCalls = new AtomicInteger(); + final ChannelFactory channelFactory = (file, openOption) -> { + FileChannel delegate = FileChannel.open(file, openOption); + boolean success = false; + try { + // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation + final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); + + final FileChannel channel; + if (isCkpFile) { + channel = delegate; + } else { + channel = new FilterFileChannel(delegate) { + + @Override + public int write(ByteBuffer src) throws IOException { + writeCalls.incrementAndGet(); + return super.write(src); + } + }; + } + success = true; + return channel; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(delegate); + } + } + }; + return channelFactory; + } + + public void testExtraGenToKeep() throws Exception { + TranslogConfig config = getConfig(1); + ChannelFactory channelFactory = getChannelFactory(); + final Set persistedSeqNos = new HashSet<>(); + String translogUUID = Translog.createEmptyTranslog( + config.getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, + shardId, + channelFactory, + primaryTerm.get() + ); + TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); + ArrayList ops = new ArrayList<>(); + try ( + RemoteFsTranslog translog = new RemoteFsTranslog( + config, + translogUUID, + deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + persistedSeqNos::add, + repository, + threadPool, + () -> Boolean.TRUE, + new RemoteTranslogTransferTracker(shardId, 10) + ) { + @Override + ChannelFactory getChannelFactory() { + return channelFactory; + } + } + ) { + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 1 })); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 1 })); + + // expose the new checkpoint (simulating a commit), before we trim the translog + translog.setMinSeqNoToKeep(2); + + // Trims from local + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("4", 3, primaryTerm.get(), new byte[] { 1 })); + + // Trims from remote now + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertEquals( + 6, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + + } + } + public void testReadLocation() throws IOException { ArrayList ops = new ArrayList<>(); ArrayList locs = new ArrayList<>(); @@ -619,14 +735,22 @@ public void testSimpleOperationsUpload() throws Exception { // this should now trim as tlog-2 files from remote, but not tlog-3 and tlog-4 addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 2, primaryTerm.get(), new byte[] { 1 })); assertEquals(2, translog.stats().estimatedNumberOfOperations()); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); translog.setMinSeqNoToKeep(2); - - assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + // this should now trim as tlog-2 files from remote, but not tlog-3 and tlog-4 translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); assertEquals(1, translog.stats().estimatedNumberOfOperations()); - assertBusy(() -> assertEquals(4, translog.allUploaded().size())); + assertBusy(() -> { + assertEquals(4, translog.allUploaded().size()); + assertEquals( + 4, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }); + } public void testMetadataFileDeletion() throws Exception { @@ -1273,49 +1397,10 @@ public void testTranslogWriter() throws IOException { } public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { - Path tempDir = createTempDir(); - final TranslogConfig temp = getTranslogConfig(tempDir); - final TranslogConfig config = new TranslogConfig( - temp.getShardId(), - temp.getTranslogPath(), - temp.getIndexSettings(), - temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB), - "" - ); - + final TranslogConfig config = getConfig(1); final Set persistedSeqNos = new HashSet<>(); - final AtomicInteger writeCalls = new AtomicInteger(); - - final ChannelFactory channelFactory = (file, openOption) -> { - FileChannel delegate = FileChannel.open(file, openOption); - boolean success = false; - try { - // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation - final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); - - final FileChannel channel; - if (isCkpFile) { - channel = delegate; - } else { - channel = new FilterFileChannel(delegate) { - - @Override - public int write(ByteBuffer src) throws IOException { - writeCalls.incrementAndGet(); - return super.write(src); - } - }; - } - success = true; - return channel; - } finally { - if (success == false) { - IOUtils.closeWhileHandlingException(delegate); - } - } - }; - + writeCalls = new AtomicInteger(); + final ChannelFactory channelFactory = getChannelFactory(); String translogUUID = Translog.createEmptyTranslog( config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, From 8f13dee77a7e78833cf90b20607cb4d714032bd8 Mon Sep 17 00:00:00 2001 From: Rahul Karajgikar <50844303+rahulkarajgikar@users.noreply.github.com> Date: Mon, 23 Oct 2023 14:50:20 +0530 Subject: [PATCH 074/178] Make index and global metadata upload timeout dynamic cluster settings (#10814) * Make index and global metadata upload wait time dynamic Signed-off-by: Rahul Karajgikar --- CHANGELOG.md | 1 + .../common/settings/ClusterSettings.java | 2 + .../remote/RemoteClusterStateService.java | 47 +++++++++++++++++-- .../RemoteClusterStateServiceTests.java | 36 +++++++++++++- 4 files changed, 80 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 374dd4ab57ee6..9874e7c431b7c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -94,6 +94,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) - Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) - Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) +- [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) ### Dependencies - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 7ac7da819b215..a0fca4f0a2ff0 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -682,6 +682,8 @@ public void apply(Settings value, Settings current, Settings previous) { // Remote cluster state settings RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, + RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, + RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index b505b3e2d4c0c..ae4a3fab9852d 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -83,9 +83,23 @@ public class RemoteClusterStateService implements Closeable { private static final Logger logger = LogManager.getLogger(RemoteClusterStateService.class); - // TODO make this two variable as dynamic setting [issue: #10688] - public static final int INDEX_METADATA_UPLOAD_WAIT_MILLIS = 20000; - public static final int GLOBAL_METADATA_UPLOAD_WAIT_MILLIS = 20000; + public static final TimeValue INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final TimeValue GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final Setting INDEX_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.index_metadata.upload_timeout", + INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.global_metadata.upload_timeout", + GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); public static final ChecksumBlobStoreFormat INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "index-metadata", @@ -141,6 +155,9 @@ public class RemoteClusterStateService implements Closeable { private BlobStoreTransferService blobStoreTransferService; private volatile TimeValue slowWriteLoggingThreshold; + private volatile TimeValue indexMetadataUploadTimeout; + private volatile TimeValue globalMetadataUploadTimeout; + private final AtomicBoolean deleteStaleMetadataRunning = new AtomicBoolean(false); public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 1; @@ -171,7 +188,11 @@ public RemoteClusterStateService( this.relativeTimeNanosSupplier = relativeTimeNanosSupplier; this.threadpool = threadPool; this.slowWriteLoggingThreshold = clusterSettings.get(SLOW_WRITE_LOGGING_THRESHOLD); + this.indexMetadataUploadTimeout = clusterSettings.get(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING); + this.globalMetadataUploadTimeout = clusterSettings.get(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING); clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold); + clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); + clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); } private BlobStoreTransferService getBlobStoreTransferService() { @@ -367,7 +388,7 @@ private String writeGlobalMetadata(ClusterState clusterState) throws IOException ); try { - if (latch.await(GLOBAL_METADATA_UPLOAD_WAIT_MILLIS, TimeUnit.MILLISECONDS) == false) { + if (latch.await(getGlobalMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { // TODO: We should add metrics where transfer is timing out. [Issue: #10687] GlobalMetadataTransferException ex = new GlobalMetadataTransferException( String.format(Locale.ROOT, "Timed out waiting for transfer of global metadata to complete") @@ -422,7 +443,7 @@ private List writeIndexMetadataParallel(ClusterState clus } try { - if (latch.await(INDEX_METADATA_UPLOAD_WAIT_MILLIS, TimeUnit.MILLISECONDS) == false) { + if (latch.await(getIndexMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { IndexMetadataTransferException ex = new IndexMetadataTransferException( String.format( Locale.ROOT, @@ -621,6 +642,22 @@ private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { this.slowWriteLoggingThreshold = slowWriteLoggingThreshold; } + private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeout) { + this.indexMetadataUploadTimeout = newIndexMetadataUploadTimeout; + } + + private void setGlobalMetadataUploadTimeout(TimeValue newGlobalMetadataUploadTimeout) { + this.globalMetadataUploadTimeout = newGlobalMetadataUploadTimeout; + } + + public TimeValue getIndexMetadataUploadTimeout() { + return this.indexMetadataUploadTimeout; + } + + public TimeValue getGlobalMetadataUploadTimeout() { + return this.globalMetadataUploadTimeout; + } + static String getManifestFileName(long term, long version, boolean committed) { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest______C/P____ return String.join( diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 827735f6bcc0b..4be5fc03c2a6d 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -102,6 +102,7 @@ public class RemoteClusterStateServiceTests extends OpenSearchTestCase { private RemoteClusterStateService remoteClusterStateService; + private ClusterSettings clusterSettings; private Supplier repositoriesServiceSupplier; private RepositoriesService repositoriesService; private BlobStoreRepository blobStoreRepository; @@ -132,6 +133,7 @@ public void setup() { .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); NamedXContentRegistry xContentRegistry = new NamedXContentRegistry( Stream.of( NetworkModule.getNamedXContents().stream(), @@ -149,7 +151,7 @@ public void setup() { "test-node-id", repositoriesServiceSupplier, settings, - new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + clusterSettings, () -> 0L, threadPool ); @@ -1053,6 +1055,38 @@ public void testSingleConcurrentExecutionOfStaleManifestCleanup() throws Excepti assertBusy(() -> assertEquals(1, callCount.get())); } + public void testIndexMetadataUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, + remoteClusterStateService.getIndexMetadataUploadTimeout() + ); + + // verify update index metadata upload timeout + int indexMetadataUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.index_metadata.upload_timeout", indexMetadataUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(indexMetadataUploadTimeout, remoteClusterStateService.getIndexMetadataUploadTimeout().seconds()); + } + + public void testGlobalMetadataUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, + remoteClusterStateService.getGlobalMetadataUploadTimeout() + ); + + // verify update global metadata upload timeout + int globalMetadataUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.global_metadata.upload_timeout", globalMetadataUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(globalMetadataUploadTimeout, remoteClusterStateService.getGlobalMetadataUploadTimeout().seconds()); + } + private void mockObjectsForGettingPreviousClusterUUID(Map clusterUUIDsPointers) throws IOException { mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, false); } From d84a00bbcadd41a4631eed3a96691645322ee5c0 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Mon, 23 Oct 2023 16:59:55 +0530 Subject: [PATCH 075/178] Revert "Validate checksum of each segment file post download from remote store (#10119)" (#10847) This reverts commit 6c022612769e6bc7ed18b9ec0888d6cd0dd415cc. Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../remotestore/RemoteStoreRestoreIT.java | 33 +------------ .../store/RemoteSegmentStoreDirectory.java | 4 -- .../org/opensearch/index/store/Store.java | 47 +------------------ 3 files changed, 3 insertions(+), 81 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index 212f797180077..7626e3dba6424 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -10,11 +10,8 @@ import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; -import org.opensearch.action.admin.indices.get.GetIndexRequest; -import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.health.ClusterHealthStatus; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; @@ -22,12 +19,10 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; -import org.opensearch.test.CorruptionUtils; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; import java.util.HashMap; import java.util.Locale; @@ -35,14 +30,13 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThan; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0) public class RemoteStoreRestoreIT extends BaseRemoteStoreRestoreIT { /** @@ -467,30 +461,5 @@ public void testRateLimitedRemoteDownloads() throws Exception { } } - public void testRestoreCorruptSegmentShouldFail() throws IOException, ExecutionException, InterruptedException { - prepareCluster(1, 3, INDEX_NAME, 0, 1); - indexData(randomIntBetween(3, 4), true, INDEX_NAME); - - GetIndexResponse getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest()).get(); - String indexUUID = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); - - logger.info("--> Corrupting segment files in remote segment store"); - Path path = segmentRepoPath.resolve(indexUUID).resolve("0").resolve("segments").resolve("data"); - try (Stream dataPath = Files.list(path)) { - CorruptionUtils.corruptFile(random(), dataPath.toArray(Path[]::new)); - } - - logger.info("--> Stop primary"); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); - - logger.info("--> Close and restore the index"); - client().admin() - .cluster() - .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME).waitForCompletion(true), PlainActionFuture.newFuture()); - - logger.info("--> Check for index status, should be red due to corruption"); - ensureRed(INDEX_NAME); - } - // TODO: Restore flow - index aliases } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 6b43fed3d8930..be1f2341236ab 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -290,10 +290,6 @@ public void setWrittenByMajor(int writtenByMajor) { ); } } - - public int getWrittenByMajor() { - return writtenByMajor; - } } /** diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index d0cd2635ba672..b822742de6e97 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -105,7 +105,6 @@ import java.io.UncheckedIOException; import java.nio.file.NoSuchFileException; import java.nio.file.Path; -import java.text.ParseException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -121,7 +120,6 @@ import java.util.zip.CRC32; import java.util.zip.Checksum; -import static java.lang.Character.MAX_RADIX; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; @@ -977,11 +975,7 @@ public void copyFrom(Directory from, String src, String dest, IOContext context) boolean success = false; long startTime = System.currentTimeMillis(); try { - if (from instanceof RemoteSegmentStoreDirectory) { - copyFileAndValidateChecksum(from, src, dest, context, fileSize); - } else { - super.copyFrom(from, src, dest, context); - } + super.copyFrom(from, src, dest, context); success = true; afterDownload(fileSize, startTime); } finally { @@ -991,43 +985,6 @@ public void copyFrom(Directory from, String src, String dest, IOContext context) } } - private void copyFileAndValidateChecksum(Directory from, String src, String dest, IOContext context, long fileSize) - throws IOException { - RemoteSegmentStoreDirectory.UploadedSegmentMetadata metadata = ((RemoteSegmentStoreDirectory) from) - .getSegmentsUploadedToRemoteStore() - .get(dest); - boolean success = false; - try (IndexInput is = from.openInput(src, context); IndexOutput os = createOutput(dest, context)) { - // Here, we don't need the exact version as LuceneVerifyingIndexOutput does not verify version - // It is just used to emit logs when the entire metadata object is provided as parameter. Also, - // we can't provide null version as StoreFileMetadata has non-null check on writtenBy field. - Version luceneMajorVersion = Version.parse(metadata.getWrittenByMajor() + ".0.0"); - long checksum = Long.parseLong(metadata.getChecksum()); - StoreFileMetadata storeFileMetadata = new StoreFileMetadata( - dest, - fileSize, - Long.toString(checksum, MAX_RADIX), - luceneMajorVersion - ); - VerifyingIndexOutput verifyingIndexOutput = new LuceneVerifyingIndexOutput(storeFileMetadata, os); - verifyingIndexOutput.copyBytes(is, is.length()); - verifyingIndexOutput.verify(); - success = true; - } catch (ParseException e) { - throw new IOException("Exception while reading version info for segment file from remote store: " + dest, e); - } finally { - if (success == false) { - // If the exception is thrown after file is created, we clean up the file. - // We ignore the exception as the deletion is best-effort basis and can fail if file does not exist. - try { - deleteFile("Quietly deleting", dest); - } catch (Exception e) { - // Ignore - } - } - } - } - /** * Updates the amount of bytes attempted for download */ @@ -1519,7 +1476,7 @@ public static boolean isAutogenerated(String name) { * Produces a string representation of the given digest value. */ public static String digestToString(long digest) { - return Long.toString(digest, MAX_RADIX); + return Long.toString(digest, Character.MAX_RADIX); } /** From 08ffd3586d381fe3526965e8a4570226a9704407 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 11:21:44 -0400 Subject: [PATCH 076/178] Bump org.apache.logging.log4j:log4j-core from 2.20.0 to 2.21.0 (#10858) * Bump org.apache.logging.log4j:log4j-core Bumps org.apache.logging.log4j:log4j-core from 2.20.0 to 2.21.0. --- updated-dependencies: - dependency-name: org.apache.logging.log4j:log4j-core dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] * Bumps org.apache.logging.log4j:log4j-core from 2.20.0 to 2.21.0 Signed-off-by: Andriy Redko --------- Signed-off-by: dependabot[bot] Signed-off-by: Andriy Redko Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Andriy Redko --- CHANGELOG.md | 3 ++- .../src/testKit/thirdPartyAudit/sample_jars/build.gradle | 3 ++- buildSrc/version.properties | 2 +- libs/core/licenses/log4j-api-2.20.0.jar.sha1 | 1 - libs/core/licenses/log4j-api-2.21.0.jar.sha1 | 1 + plugins/crypto-kms/licenses/log4j-1.2-api-2.20.0.jar.sha1 | 1 - plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 | 1 + .../licenses/log4j-1.2-api-2.20.0.jar.sha1 | 1 - .../licenses/log4j-1.2-api-2.21.0.jar.sha1 | 1 + plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 | 1 - plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 | 1 + plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 | 1 - plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 | 1 + .../repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 | 1 - .../repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 | 1 + .../licenses/log4j-slf4j-impl-2.20.0.jar.sha1 | 1 - .../licenses/log4j-slf4j-impl-2.21.0.jar.sha1 | 1 + plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 | 1 - plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 | 1 + qa/os/build.gradle | 5 +++++ server/build.gradle | 4 +++- server/licenses/log4j-api-2.20.0.jar.sha1 | 1 - server/licenses/log4j-api-2.21.0.jar.sha1 | 1 + server/licenses/log4j-core-2.20.0.jar.sha1 | 1 - server/licenses/log4j-core-2.21.0.jar.sha1 | 1 + server/licenses/log4j-jul-2.20.0.jar.sha1 | 1 - server/licenses/log4j-jul-2.21.0.jar.sha1 | 1 + 27 files changed, 24 insertions(+), 15 deletions(-) delete mode 100644 libs/core/licenses/log4j-api-2.20.0.jar.sha1 create mode 100644 libs/core/licenses/log4j-api-2.21.0.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/log4j-1.2-api-2.20.0.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 create mode 100644 plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 create mode 100644 plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 delete mode 100644 server/licenses/log4j-api-2.20.0.jar.sha1 create mode 100644 server/licenses/log4j-api-2.21.0.jar.sha1 delete mode 100644 server/licenses/log4j-core-2.20.0.jar.sha1 create mode 100644 server/licenses/log4j-core-2.21.0.jar.sha1 delete mode 100644 server/licenses/log4j-jul-2.20.0.jar.sha1 create mode 100644 server/licenses/log4j-jul-2.21.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 9874e7c431b7c..8c4563a216974 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -107,6 +107,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) - Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) - Bump `com.squareup.okio:okio` from 3.5.0 to 3.6.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.21.0 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) @@ -130,4 +131,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x \ No newline at end of file diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index cb8050d1718c4..74c88e0961c9c 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -15,8 +15,9 @@ plugins { repositories { mavenCentral() } + dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.20.0" + implementation "org.apache.logging.log4j:log4j-core:2.21.0" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a5171aa582a86..96d398c35851d 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -14,7 +14,7 @@ jackson_databind = 2.15.2 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 -log4j = 2.20.0 +log4j = 2.21.0 slf4j = 1.7.36 asm = 9.6 jettison = 1.5.4 diff --git a/libs/core/licenses/log4j-api-2.20.0.jar.sha1 b/libs/core/licenses/log4j-api-2.20.0.jar.sha1 deleted file mode 100644 index 37154d9861ac0..0000000000000 --- a/libs/core/licenses/log4j-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fe6082e660daf07c689a89c94dc0f49c26b44bb \ No newline at end of file diff --git a/libs/core/licenses/log4j-api-2.21.0.jar.sha1 b/libs/core/licenses/log4j-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..51446052594aa --- /dev/null +++ b/libs/core/licenses/log4j-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +760192f2b69eacf4a4afc78e5a1d7a8de054fcbd \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/crypto-kms/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/crypto-kms/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 deleted file mode 100644 index 800a4aa87ba0e..0000000000000 --- a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ab4f082fd162f60afcaf2b8744a3d959feab3e8 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..0e22f98daa61c --- /dev/null +++ b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 @@ -0,0 +1 @@ +911fdb5b1a1df36719c579ecc6f2957b88bce1ab \ No newline at end of file diff --git a/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/qa/os/build.gradle b/qa/os/build.gradle index 66c6525439dac..082ed5277575a 100644 --- a/qa/os/build.gradle +++ b/qa/os/build.gradle @@ -70,6 +70,11 @@ tasks.dependenciesInfo.enabled = false tasks.thirdPartyAudit.ignoreMissingClasses() +tasks.thirdPartyAudit.ignoreViolations( + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1' +) + tasks.register('destructivePackagingTest') { dependsOn 'destructiveDistroTest' } diff --git a/server/build.gradle b/server/build.gradle index f6db3d53a0dcc..c56f9d5aa288f 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -364,7 +364,9 @@ tasks.named("thirdPartyAudit").configure { 'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', 'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', - 'com.google.protobuf.UnsafeUtil$MemoryAccessor' + 'com.google.protobuf.UnsafeUtil$MemoryAccessor', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1' ) } diff --git a/server/licenses/log4j-api-2.20.0.jar.sha1 b/server/licenses/log4j-api-2.20.0.jar.sha1 deleted file mode 100644 index 37154d9861ac0..0000000000000 --- a/server/licenses/log4j-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fe6082e660daf07c689a89c94dc0f49c26b44bb \ No newline at end of file diff --git a/server/licenses/log4j-api-2.21.0.jar.sha1 b/server/licenses/log4j-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..51446052594aa --- /dev/null +++ b/server/licenses/log4j-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +760192f2b69eacf4a4afc78e5a1d7a8de054fcbd \ No newline at end of file diff --git a/server/licenses/log4j-core-2.20.0.jar.sha1 b/server/licenses/log4j-core-2.20.0.jar.sha1 deleted file mode 100644 index 49c972626563b..0000000000000 --- a/server/licenses/log4j-core-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eb2a9a47b1396e00b5eee1264296729a70565cc0 \ No newline at end of file diff --git a/server/licenses/log4j-core-2.21.0.jar.sha1 b/server/licenses/log4j-core-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..c88e6f7a25ca9 --- /dev/null +++ b/server/licenses/log4j-core-2.21.0.jar.sha1 @@ -0,0 +1 @@ +122e1a9e0603cc9eae07b0846a6ff01f2454bc49 \ No newline at end of file diff --git a/server/licenses/log4j-jul-2.20.0.jar.sha1 b/server/licenses/log4j-jul-2.20.0.jar.sha1 deleted file mode 100644 index a456651e4569e..0000000000000 --- a/server/licenses/log4j-jul-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8170e6118eac1ab332046c179718a0f107f688e1 \ No newline at end of file diff --git a/server/licenses/log4j-jul-2.21.0.jar.sha1 b/server/licenses/log4j-jul-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..480010840abca --- /dev/null +++ b/server/licenses/log4j-jul-2.21.0.jar.sha1 @@ -0,0 +1 @@ +f0da61113f4a47654677e6a98b1e13ca7de2483d \ No newline at end of file From da851244a273d6fbfc42eb115f2dcfca1c3cc728 Mon Sep 17 00:00:00 2001 From: David Z <38449481+dzane17@users.noreply.github.com> Date: Mon, 23 Oct 2023 11:01:47 -0700 Subject: [PATCH 077/178] Update per request latency check to V_2_12_0 (#10865) Signed-off-by: David Zane --- .../main/java/org/opensearch/action/search/SearchRequest.java | 4 ++-- .../java/org/opensearch/action/search/SearchResponse.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index 9e50213eab5f9..fb026dae630b7 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -256,7 +256,7 @@ public SearchRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_7_0)) { pipeline = in.readOptionalString(); } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { phaseTook = in.readOptionalBoolean(); } } @@ -290,7 +290,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeOptionalString(pipeline); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalBoolean(phaseTook); } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponse.java b/server/src/main/java/org/opensearch/action/search/SearchResponse.java index 91f0dc0737637..96d07982d03db 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponse.java @@ -116,7 +116,7 @@ public SearchResponse(StreamInput in) throws IOException { clusters = new Clusters(in); scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { phaseTook = in.readOptionalWriteable(PhaseTook::new); } else { phaseTook = null; @@ -557,7 +557,7 @@ public void writeTo(StreamOutput out) throws IOException { clusters.writeTo(out); out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(phaseTook); } out.writeVInt(skippedShards); From 8b2173910f754a48773b3283e1a511cbc1a9db78 Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Mon, 23 Oct 2023 16:08:04 -0700 Subject: [PATCH 078/178] =?UTF-8?q?Add=20cluster=20setting=20cluster.restr?= =?UTF-8?q?ict.index.replication=5Ftype=20t=E2=80=A6=20(#10866)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add cluster setting CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING to restrict setting of index setting replication type Signed-off-by: Poojita Raj * Add Changelog entry Signed-off-by: Poojita Raj * refactoring Signed-off-by: Poojita Raj --------- Signed-off-by: Poojita Raj --- CHANGELOG.md | 3 ++- .../SegmentReplicationClusterSettingIT.java | 27 +++++++++++++++++++ .../metadata/MetadataCreateIndexService.java | 19 +++++++++++++ .../common/settings/ClusterSettings.java | 3 ++- .../opensearch/indices/IndicesService.java | 11 ++++++++ .../MetadataCreateIndexServiceTests.java | 16 +++++++++-- 6 files changed, 75 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c4563a216974..0b0d9720ad208 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -95,6 +95,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) - Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) - [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) +- Added cluster setting cluster.restrict.index.replication_type to restrict setting of index setting replication type ([#10866](https://github.com/opensearch-project/OpenSearch/pull/10866)) ### Dependencies - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) @@ -131,4 +132,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java index a82fd8d845709..186a5ce39f131 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java @@ -19,6 +19,7 @@ import org.opensearch.test.OpenSearchIntegTestCase; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.indices.IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -123,4 +124,30 @@ public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Ex assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), false); } + public void testIndexReplicationTypeWhenRestrictSettingTrue() { + testRestrictIndexReplicationTypeSetting(true, randomFrom(ReplicationType.values())); + } + + public void testIndexReplicationTypeWhenRestrictSettingFalse() { + testRestrictIndexReplicationTypeSetting(false, randomFrom(ReplicationType.values())); + } + + private void testRestrictIndexReplicationTypeSetting(boolean setRestrict, ReplicationType replicationType) { + String expectedExceptionMsg = + "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.restrict.index.replication_type=true];"; + String clusterManagerName = internalCluster().startNode( + Settings.builder().put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), setRestrict).build() + ); + internalCluster().startDataOnlyNodes(1); + + // Test create index fails + Settings indexSettings = Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, replicationType).build(); + if (setRestrict) { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings)); + assertEquals(expectedExceptionMsg, exception.getMessage()); + } else { + createIndex(INDEX_NAME, indexSettings); + } + } + } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 8d76a39712ee3..78a22fe11f072 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1252,6 +1252,7 @@ List getIndexSettingsValidationErrors( if (forbidPrivateIndexSettings) { validationErrors.addAll(validatePrivateSettingsNotExplicitlySet(settings, indexScopedSettings)); } + validateIndexReplicationTypeSettings(settings, clusterService.getClusterSettings()).ifPresent(validationErrors::add); if (indexName.isEmpty() || indexName.get().charAt(0) != '.') { // Apply aware replica balance validation only to non system indices int replicaCount = settings.getAsInt( @@ -1306,6 +1307,24 @@ private static List validateIndexCustomPath(Settings settings, @Nullable return validationErrors; } + /** + * Validates {@code index.replication.type} is not set if {@code cluster.restrict.index.replication_type} is set to true. + * + * @param requestSettings settings passed in during index create request + * @param clusterSettings cluster setting + */ + private static Optional validateIndexReplicationTypeSettings(Settings requestSettings, ClusterSettings clusterSettings) { + if (requestSettings.hasValue(SETTING_REPLICATION_TYPE) + && clusterSettings.get(IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING)) { + return Optional.of( + "index setting [index.replication.type] is not allowed to be set as [" + + IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey() + + "=true]" + ); + } + return Optional.empty(); + } + /** * Validates the settings and mappings for shrinking an index. * diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index a0fca4f0a2ff0..c2c6effc3336f 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -690,7 +690,8 @@ public void apply(Settings value, Settings current, Settings previous) { AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, - CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT + CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, + IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 50c551c2be29b..36abc77893d81 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -299,6 +299,17 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); + /** + * This setting is used to restrict creation of index where the 'index.replication.type' index setting is set. + * If disabled, the replication type can be specified. + */ + public static final Setting CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING = Setting.boolSetting( + "cluster.restrict.index.replication_type", + false, + Property.NodeScope, + Property.Final + ); + /** * The node's settings. */ diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index e40826915c848..cace66d8c6d9e 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -139,6 +139,7 @@ import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; import static org.opensearch.node.Node.NODE_ATTRIBUTES; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -1177,6 +1178,8 @@ public void testvalidateIndexSettings() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a, b") .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) + .put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), true) + .put(SETTING_REPLICATION_TYPE, randomFrom(ReplicationType.values())) .build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); when(clusterService.getSettings()).thenReturn(settings); @@ -1200,8 +1203,12 @@ public void testvalidateIndexSettings() { ); List validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); - assertThat(validationErrors.size(), is(1)); - assertThat(validationErrors.get(0), is("expected total copies needs to be a multiple of total awareness attributes [3]")); + assertThat(validationErrors.size(), is(2)); + assertThat( + validationErrors.get(0), + is("index setting [index.replication.type] is not allowed to be set as [cluster.restrict.index.replication_type=true]") + ); + assertThat(validationErrors.get(1), is("expected total copies needs to be a multiple of total awareness attributes [3]")); settings = Settings.builder() .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") @@ -1209,8 +1216,13 @@ public void testvalidateIndexSettings() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) .put(SETTING_NUMBER_OF_REPLICAS, 2) + .put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), false) + .put(SETTING_REPLICATION_TYPE, randomFrom(ReplicationType.values())) .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); assertThat(validationErrors.size(), is(0)); From 5a288ed3ecf06a048eba6028c445aa4e642d39c5 Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Tue, 24 Oct 2023 09:02:48 +0530 Subject: [PATCH 079/178] Changing version check to 2.12 for remote cluster state (#10844) Signed-off-by: Dhwanil Patel --- .../opensearch/gateway/remote/ClusterMetadataManifest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java index 97b37d9532f85..4725f40076ce2 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -262,7 +262,7 @@ public ClusterMetadataManifest(StreamInput in) throws IOException { this.indices = Collections.unmodifiableList(in.readList(UploadedIndexMetadata::new)); this.previousClusterUUID = in.readString(); this.clusterUUIDCommitted = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { this.codecVersion = in.readInt(); this.globalMetadataFileName = in.readString(); } else { @@ -316,7 +316,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(indices); out.writeString(previousClusterUUID); out.writeBoolean(clusterUUIDCommitted); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeInt(codecVersion); out.writeString(globalMetadataFileName); } From a2b5f0e3a9515f101653afc88dd98e48015f6346 Mon Sep 17 00:00:00 2001 From: Shivansh Arora <31575408+shiv0408@users.noreply.github.com> Date: Tue, 24 Oct 2023 13:58:32 +0530 Subject: [PATCH 080/178] Created new urgent priority threadpool for remote cluster state uploads (#10685) * Created new Urgent WritePriority and a new threadpools and S3Client for this priority which is being used to upload cluster state Signed-off-by: Shivansh Arora --- .../s3/S3BlobStoreRepositoryTests.java | 2 +- .../s3/AmazonAsyncS3Reference.java | 1 + .../s3/AmazonAsyncS3WithCredentials.java | 10 ++- .../repositories/s3/S3AsyncService.java | 17 +++- .../repositories/s3/S3BlobContainer.java | 11 ++- .../repositories/s3/S3BlobStore.java | 5 +- .../repositories/s3/S3Repository.java | 6 ++ .../repositories/s3/S3RepositoryPlugin.java | 22 ++++- .../s3/async/AsyncPartsHandler.java | 15 +++- .../s3/async/AsyncTransferManager.java | 21 ++++- .../s3/RepositoryCredentialsTests.java | 2 +- .../repositories/s3/S3AsyncServiceTests.java | 8 +- .../s3/S3BlobContainerMockClientTests.java | 5 +- .../s3/S3BlobContainerRetriesTests.java | 2 + .../s3/S3BlobStoreContainerTests.java | 8 +- .../repositories/s3/S3RepositoryTests.java | 1 + .../s3/async/AsyncTransferManagerTests.java | 1 + .../blobstore/stream/write/WritePriority.java | 3 +- .../remote/RemoteClusterStateService.java | 4 +- .../blobstore/ChecksumBlobStoreFormat.java | 45 ++++++++-- .../RemoteClusterStateServiceTests.java | 2 +- .../snapshots/BlobStoreFormatTests.java | 87 +++++++++++++++---- 22 files changed, 224 insertions(+), 54 deletions(-) diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 4df30bfd2169e..da2c6e8c1b0ee 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -249,7 +249,7 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, false) { + return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, null, false) { @Override public BlobStore blobStore() { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java index 0b5fcb6df280e..45170ea1ad209 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java @@ -29,6 +29,7 @@ public class AmazonAsyncS3Reference extends RefCountedReleasable { client.client().close(); client.priorityClient().close(); + client.urgentClient().close(); AwsCredentialsProvider credentials = client.credentials(); if (credentials instanceof Closeable) { try { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java index fa2db83729d25..f8a313b55d945 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java @@ -19,16 +19,19 @@ final class AmazonAsyncS3WithCredentials { private final S3AsyncClient client; private final S3AsyncClient priorityClient; + private final S3AsyncClient urgentClient; private final AwsCredentialsProvider credentials; private AmazonAsyncS3WithCredentials( final S3AsyncClient client, final S3AsyncClient priorityClient, + final S3AsyncClient urgentClient, @Nullable final AwsCredentialsProvider credentials ) { this.client = client; this.credentials = credentials; this.priorityClient = priorityClient; + this.urgentClient = urgentClient; } S3AsyncClient client() { @@ -39,6 +42,10 @@ S3AsyncClient priorityClient() { return priorityClient; } + S3AsyncClient urgentClient() { + return urgentClient; + } + AwsCredentialsProvider credentials() { return credentials; } @@ -46,8 +53,9 @@ AwsCredentialsProvider credentials() { static AmazonAsyncS3WithCredentials create( final S3AsyncClient client, final S3AsyncClient priorityClient, + final S3AsyncClient urgentClient, @Nullable final AwsCredentialsProvider credentials ) { - return new AmazonAsyncS3WithCredentials(client, priorityClient, credentials); + return new AmazonAsyncS3WithCredentials(client, priorityClient, urgentClient, credentials); } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java index 08215ebdd45e0..262304029a0d3 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java @@ -103,6 +103,7 @@ public synchronized void refreshAndClearCache(Map clie */ public AmazonAsyncS3Reference client( RepositoryMetadata repositoryMetadata, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -119,7 +120,7 @@ public AmazonAsyncS3Reference client( return existing; } final AmazonAsyncS3Reference clientReference = new AmazonAsyncS3Reference( - buildClient(clientSettings, priorityExecutorBuilder, normalExecutorBuilder) + buildClient(clientSettings, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder) ); clientReference.incRef(); clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientSettings, clientReference).immutableMap(); @@ -165,6 +166,7 @@ S3ClientSettings settings(RepositoryMetadata repositoryMetadata) { // proxy for testing synchronized AmazonAsyncS3WithCredentials buildClient( final S3ClientSettings clientSettings, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -195,6 +197,17 @@ synchronized AmazonAsyncS3WithCredentials buildClient( builder.forcePathStyle(true); } + builder.httpClient(buildHttpClient(clientSettings, urgentExecutorBuilder.getAsyncTransferEventLoopGroup())); + builder.asyncConfiguration( + ClientAsyncConfiguration.builder() + .advancedOption( + SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR, + urgentExecutorBuilder.getFutureCompletionExecutor() + ) + .build() + ); + final S3AsyncClient urgentClient = SocketAccess.doPrivileged(builder::build); + builder.httpClient(buildHttpClient(clientSettings, priorityExecutorBuilder.getAsyncTransferEventLoopGroup())); builder.asyncConfiguration( ClientAsyncConfiguration.builder() @@ -217,7 +230,7 @@ synchronized AmazonAsyncS3WithCredentials buildClient( ); final S3AsyncClient client = SocketAccess.doPrivileged(builder::build); - return AmazonAsyncS3WithCredentials.create(client, priorityClient, credentials); + return AmazonAsyncS3WithCredentials.create(client, priorityClient, urgentClient, credentials); } static ClientOverrideConfiguration buildOverrideConfiguration(final S3ClientSettings clientSettings) { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 24aee99242957..c1180aab0e0c7 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -195,9 +195,14 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp StreamContext streamContext = SocketAccess.doPrivileged(() -> writeContext.getStreamProvider(partSize)); try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { - S3AsyncClient s3AsyncClient = writeContext.getWritePriority() == WritePriority.HIGH - ? amazonS3Reference.get().priorityClient() - : amazonS3Reference.get().client(); + S3AsyncClient s3AsyncClient; + if (writeContext.getWritePriority() == WritePriority.URGENT) { + s3AsyncClient = amazonS3Reference.get().urgentClient(); + } else if (writeContext.getWritePriority() == WritePriority.HIGH) { + s3AsyncClient = amazonS3Reference.get().priorityClient(); + } else { + s3AsyncClient = amazonS3Reference.get().client(); + } CompletableFuture completableFuture = blobStore.getAsyncTransferManager() .uploadObject(s3AsyncClient, uploadRequest, streamContext, blobStore.getStatsMetricPublisher()); completableFuture.whenComplete((response, throwable) -> { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index f568d871dd31a..e8e043357e126 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -84,6 +84,7 @@ class S3BlobStore implements BlobStore { private final StatsMetricPublisher statsMetricPublisher = new StatsMetricPublisher(); private final AsyncTransferManager asyncTransferManager; + private final AsyncExecutorContainer urgentExecutorBuilder; private final AsyncExecutorContainer priorityExecutorBuilder; private final AsyncExecutorContainer normalExecutorBuilder; private final boolean multipartUploadEnabled; @@ -100,6 +101,7 @@ class S3BlobStore implements BlobStore { int bulkDeletesSize, RepositoryMetadata repositoryMetadata, AsyncTransferManager asyncTransferManager, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -116,6 +118,7 @@ class S3BlobStore implements BlobStore { this.asyncTransferManager = asyncTransferManager; this.normalExecutorBuilder = normalExecutorBuilder; this.priorityExecutorBuilder = priorityExecutorBuilder; + this.urgentExecutorBuilder = urgentExecutorBuilder; } @Override @@ -139,7 +142,7 @@ public AmazonS3Reference clientReference() { } public AmazonAsyncS3Reference asyncClientReference() { - return s3AsyncService.client(repositoryMetadata, priorityExecutorBuilder, normalExecutorBuilder); + return s3AsyncService.client(repositoryMetadata, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder); } int getMaxRetries() { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index aaf5b79891cdc..728a99b1220a6 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -234,6 +234,7 @@ class S3Repository extends MeteredBlobStoreRepository { private final AsyncTransferManager asyncUploadUtils; private final S3AsyncService s3AsyncService; private final boolean multipartUploadEnabled; + private final AsyncExecutorContainer urgentExecutorBuilder; private final AsyncExecutorContainer priorityExecutorBuilder; private final AsyncExecutorContainer normalExecutorBuilder; private final Path pluginConfigPath; @@ -248,6 +249,7 @@ class S3Repository extends MeteredBlobStoreRepository { final ClusterService clusterService, final RecoverySettings recoverySettings, final AsyncTransferManager asyncUploadUtils, + final AsyncExecutorContainer urgentExecutorBuilder, final AsyncExecutorContainer priorityExecutorBuilder, final AsyncExecutorContainer normalExecutorBuilder, final S3AsyncService s3AsyncService, @@ -260,6 +262,7 @@ class S3Repository extends MeteredBlobStoreRepository { clusterService, recoverySettings, asyncUploadUtils, + urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder, s3AsyncService, @@ -278,6 +281,7 @@ class S3Repository extends MeteredBlobStoreRepository { final ClusterService clusterService, final RecoverySettings recoverySettings, final AsyncTransferManager asyncUploadUtils, + final AsyncExecutorContainer urgentExecutorBuilder, final AsyncExecutorContainer priorityExecutorBuilder, final AsyncExecutorContainer normalExecutorBuilder, final S3AsyncService s3AsyncService, @@ -290,6 +294,7 @@ class S3Repository extends MeteredBlobStoreRepository { this.multipartUploadEnabled = multipartUploadEnabled; this.pluginConfigPath = pluginConfigPath; this.asyncUploadUtils = asyncUploadUtils; + this.urgentExecutorBuilder = urgentExecutorBuilder; this.priorityExecutorBuilder = priorityExecutorBuilder; this.normalExecutorBuilder = normalExecutorBuilder; @@ -352,6 +357,7 @@ protected S3BlobStore createBlobStore() { bulkDeletesSize, metadata, asyncUploadUtils, + urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder ); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index c6450e49d08e2..9ed232464d080 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -75,6 +75,9 @@ * A plugin to add a repository type that writes to and from the AWS S3. */ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { + + private static final String URGENT_FUTURE_COMPLETION = "urgent_future_completion"; + private static final String URGENT_STREAM_READER = "urgent_stream_reader"; private static final String PRIORITY_FUTURE_COMPLETION = "priority_future_completion"; private static final String PRIORITY_STREAM_READER = "priority_stream_reader"; private static final String FUTURE_COMPLETION = "future_completion"; @@ -85,6 +88,7 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo private final Path configPath; + private AsyncExecutorContainer urgentExecutorBuilder; private AsyncExecutorContainer priorityExecutorBuilder; private AsyncExecutorContainer normalExecutorBuilder; @@ -96,6 +100,10 @@ public S3RepositoryPlugin(final Settings settings, final Path configPath) { public List> getExecutorBuilders(Settings settings) { List> executorBuilders = new ArrayList<>(); int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors(settings)); + executorBuilders.add( + new FixedExecutorBuilder(settings, URGENT_FUTURE_COMPLETION, urgentPoolCount(settings), 10_000, URGENT_FUTURE_COMPLETION) + ); + executorBuilders.add(new ScalingExecutorBuilder(URGENT_STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); executorBuilders.add( new FixedExecutorBuilder(settings, PRIORITY_FUTURE_COMPLETION, priorityPoolCount(settings), 10_000, PRIORITY_FUTURE_COMPLETION) ); @@ -128,6 +136,10 @@ private static int allocatedProcessors(Settings settings) { return OpenSearchExecutors.allocatedProcessors(settings); } + private static int urgentPoolCount(Settings settings) { + return boundedBy((allocatedProcessors(settings) + 7) / 8, 1, 2); + } + private static int priorityPoolCount(Settings settings) { return boundedBy((allocatedProcessors(settings) + 1) / 2, 2, 4); } @@ -150,8 +162,14 @@ public Collection createComponents( final IndexNameExpressionResolver expressionResolver, final Supplier repositoriesServiceSupplier ) { + int urgentEventLoopThreads = urgentPoolCount(clusterService.getSettings()); int priorityEventLoopThreads = priorityPoolCount(clusterService.getSettings()); int normalEventLoopThreads = normalPoolCount(clusterService.getSettings()); + this.urgentExecutorBuilder = new AsyncExecutorContainer( + threadPool.executor(URGENT_FUTURE_COMPLETION), + threadPool.executor(URGENT_STREAM_READER), + new AsyncTransferEventLoopGroup(urgentEventLoopThreads) + ); this.priorityExecutorBuilder = new AsyncExecutorContainer( threadPool.executor(PRIORITY_FUTURE_COMPLETION), threadPool.executor(PRIORITY_STREAM_READER), @@ -176,7 +194,8 @@ protected S3Repository createRepository( AsyncTransferManager asyncUploadUtils = new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.get(clusterService.getSettings()).getBytes(), normalExecutorBuilder.getStreamReader(), - priorityExecutorBuilder.getStreamReader() + priorityExecutorBuilder.getStreamReader(), + urgentExecutorBuilder.getStreamReader() ); return new S3Repository( metadata, @@ -185,6 +204,7 @@ protected S3Repository createRepository( clusterService, recoverySettings, asyncUploadUtils, + urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder, s3AsyncService, diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java index 6007d9f9c8a1c..933ee6dc29513 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java @@ -48,6 +48,7 @@ public class AsyncPartsHandler { * @param s3AsyncClient S3 client to use for upload * @param executorService Thread pool for regular upload * @param priorityExecutorService Thread pool for priority uploads + * @param urgentExecutorService Thread pool for urgent uploads * @param uploadRequest request for upload * @param streamContext Stream context used in supplying individual file parts * @param uploadId Upload Id against which multi-part is being performed @@ -60,6 +61,7 @@ public static List> uploadParts( S3AsyncClient s3AsyncClient, ExecutorService executorService, ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService, UploadRequest uploadRequest, StreamContext streamContext, String uploadId, @@ -83,6 +85,7 @@ public static List> uploadParts( s3AsyncClient, executorService, priorityExecutorService, + urgentExecutorService, completedParts, inputStreamContainers, futures, @@ -129,6 +132,7 @@ private static void uploadPart( S3AsyncClient s3AsyncClient, ExecutorService executorService, ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService, AtomicReferenceArray completedParts, AtomicReferenceArray inputStreamContainers, List> futures, @@ -138,9 +142,14 @@ private static void uploadPart( ) { Integer partNumber = uploadPartRequest.partNumber(); - ExecutorService streamReadExecutor = uploadRequest.getWritePriority() == WritePriority.HIGH - ? priorityExecutorService - : executorService; + ExecutorService streamReadExecutor; + if (uploadRequest.getWritePriority() == WritePriority.URGENT) { + streamReadExecutor = urgentExecutorService; + } else if (uploadRequest.getWritePriority() == WritePriority.HIGH) { + streamReadExecutor = priorityExecutorService; + } else { + streamReadExecutor = executorService; + } // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered // data can be retried instead of retrying whole file by the application. InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java index a52745e33073e..4f1ab9764702e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java @@ -61,6 +61,7 @@ public final class AsyncTransferManager { private static final Logger log = LogManager.getLogger(AsyncTransferManager.class); private final ExecutorService executorService; private final ExecutorService priorityExecutorService; + private final ExecutorService urgentExecutorService; private final long minimumPartSize; /** @@ -75,10 +76,16 @@ public final class AsyncTransferManager { * @param executorService The stream reader {@link ExecutorService} for normal priority uploads * @param priorityExecutorService The stream read {@link ExecutorService} for high priority uploads */ - public AsyncTransferManager(long minimumPartSize, ExecutorService executorService, ExecutorService priorityExecutorService) { + public AsyncTransferManager( + long minimumPartSize, + ExecutorService executorService, + ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService + ) { this.executorService = executorService; this.priorityExecutorService = priorityExecutorService; this.minimumPartSize = minimumPartSize; + this.urgentExecutorService = urgentExecutorService; } /** @@ -162,6 +169,7 @@ private void doUploadInParts( s3AsyncClient, executorService, priorityExecutorService, + urgentExecutorService, uploadRequest, streamContext, uploadId, @@ -308,9 +316,14 @@ private void uploadInOneChunk( putObjectRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); putObjectRequestBuilder.checksumCRC32(base64StringFromLong(uploadRequest.getExpectedChecksum())); } - ExecutorService streamReadExecutor = uploadRequest.getWritePriority() == WritePriority.HIGH - ? priorityExecutorService - : executorService; + ExecutorService streamReadExecutor; + if (uploadRequest.getWritePriority() == WritePriority.URGENT) { + streamReadExecutor = urgentExecutorService; + } else if (uploadRequest.getWritePriority() == WritePriority.HIGH) { + streamReadExecutor = priorityExecutorService; + } else { + streamReadExecutor = executorService; + } // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered // data can be retried instead of retrying whole file by the application. InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java index a4bfe11383b4f..8e1926d40302f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java @@ -302,7 +302,7 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, false) { + return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, null, false) { @Override protected void assertSnapshotOrGenericThread() { // eliminate thread name check as we create repo manually on test/main threads diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java index e9fe557ab751a..de9ad46bb222d 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java @@ -44,12 +44,12 @@ public void testCachedClientsAreReleased() { final S3ClientSettings otherClientSettings = s3AsyncService.settings(metadata2); assertSame(clientSettings, otherClientSettings); final AmazonAsyncS3Reference reference = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); reference.close(); s3AsyncService.close(); final AmazonAsyncS3Reference referenceReloaded = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); assertNotSame(referenceReloaded, reference); referenceReloaded.close(); @@ -79,12 +79,12 @@ public void testCachedClientsWithCredentialsAreReleased() { final S3ClientSettings otherClientSettings = s3AsyncService.settings(metadata2); assertSame(clientSettings, otherClientSettings); final AmazonAsyncS3Reference reference = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); reference.close(); s3AsyncService.close(); final AmazonAsyncS3Reference referenceReloaded = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); assertNotSame(referenceReloaded, reference); referenceReloaded.close(); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java index 6eb8faa746d34..7c67519f2f3b0 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java @@ -266,10 +266,11 @@ public void verifySingleChunkUploadCallCount(boolean finalizeUploadFailure) { @Override public AmazonAsyncS3Reference client( RepositoryMetadata repositoryMetadata, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { - return new AmazonAsyncS3Reference(AmazonAsyncS3WithCredentials.create(asyncClient, asyncClient, null)); + return new AmazonAsyncS3Reference(AmazonAsyncS3WithCredentials.create(asyncClient, asyncClient, asyncClient, null)); } } @@ -393,9 +394,11 @@ private S3BlobStore createBlobStore() { new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), asyncExecutorContainer.getStreamReader(), + asyncExecutorContainer.getStreamReader(), asyncExecutorContainer.getStreamReader() ), asyncExecutorContainer, + asyncExecutorContainer, asyncExecutorContainer ); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java index a2214f5218991..ceab06bd051e9 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -221,9 +221,11 @@ protected AsyncMultiStreamBlobContainer createBlobContainer( new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), asyncExecutorContainer.getStreamReader(), + asyncExecutorContainer.getStreamReader(), asyncExecutorContainer.getStreamReader() ), asyncExecutorContainer, + asyncExecutorContainer, asyncExecutorContainer ) ) { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 2701cae6a733b..58ad290a31e85 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -935,7 +935,7 @@ public void testReadBlobAsyncMultiPart() throws Exception { final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( - AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) ); final S3BlobStore blobStore = mock(S3BlobStore.class); @@ -993,7 +993,7 @@ public void testReadBlobAsyncSinglePart() throws Exception { final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( - AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) ); final S3BlobStore blobStore = mock(S3BlobStore.class); final BlobPath blobPath = new BlobPath(); @@ -1048,7 +1048,7 @@ public void testReadBlobAsyncFailure() throws Exception { final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( - AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) ); final S3BlobStore blobStore = mock(S3BlobStore.class); @@ -1091,7 +1091,7 @@ public void testReadBlobAsyncOnCompleteFailureMissingData() throws Exception { final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( - AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) ); final S3BlobStore blobStore = mock(S3BlobStore.class); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java index e65ca69a5047b..6fec535ae6301 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java @@ -168,6 +168,7 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { null, null, null, + null, false ) { @Override diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java index 97a746cdeed93..2437547a80a6f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java @@ -64,6 +64,7 @@ public void setUp() throws Exception { asyncTransferManager = new AsyncTransferManager( ByteSizeUnit.MB.toBytes(5), Executors.newSingleThreadExecutor(), + Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor() ); super.setUp(); diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java index b8c0b52f93a3c..3f341c878c3c7 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java @@ -15,5 +15,6 @@ */ public enum WritePriority { NORMAL, - HIGH + HIGH, + URGENT } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index ae4a3fab9852d..025ad075d83b6 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -378,7 +378,7 @@ private String writeGlobalMetadata(ClusterState clusterState) throws IOException result.set(globalMetadataContainer.path().buildAsString() + globalMetadataFilename); }, ex -> { throw new GlobalMetadataTransferException(ex.getMessage(), ex); }), latch); - GLOBAL_METADATA_FORMAT.writeAsync( + GLOBAL_METADATA_FORMAT.writeAsyncWithUrgentPriority( clusterState.metadata(), globalMetadataContainer, globalMetadataFilename, @@ -510,7 +510,7 @@ private void writeIndexMetadataAsync( ex -> latchedActionListener.onFailure(new IndexMetadataTransferException(indexMetadata.getIndex().toString(), ex)) ); - INDEX_METADATA_FORMAT.writeAsync( + INDEX_METADATA_FORMAT.writeAsyncWithUrgentPriority( indexMetadata, indexMetadataContainer, indexMetadataFilename, diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java index e280141c12bc1..3e6052a5ef820 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -197,21 +197,56 @@ public void write( } /** - * Writes blob with resolving the blob name using {@link #blobName} method. - * Leverages the multipart upload if supported by the blobContainer. + * Internally calls {@link #writeAsyncWithPriority} with {@link WritePriority#NORMAL} + */ + public void writeAsync( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + ActionListener listener, + final ToXContent.Params params + ) throws IOException { + // use NORMAL priority by default + this.writeAsyncWithPriority(obj, blobContainer, name, compressor, WritePriority.NORMAL, listener, params); + } + + /** + * Internally calls {@link #writeAsyncWithPriority} with {@link WritePriority#URGENT} + *

+ * NOTE: We use this method to upload urgent priority objects like cluster state to remote stores. + * Use {@link #writeAsync(ToXContent, BlobContainer, String, Compressor, ActionListener, ToXContent.Params)} for + * other use cases. + */ + public void writeAsyncWithUrgentPriority( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + ActionListener listener, + final ToXContent.Params params + ) throws IOException { + this.writeAsyncWithPriority(obj, blobContainer, name, compressor, WritePriority.URGENT, listener, params); + } + + /** + * Method to writes blob with resolving the blob name using {@link #blobName} method with specified + * {@link WritePriority}. Leverages the multipart upload if supported by the blobContainer. * * @param obj object to be serialized * @param blobContainer blob container * @param name blob name * @param compressor whether to use compression + * @param priority write priority to be used * @param listener listener to listen to write result * @param params ToXContent params */ - public void writeAsync( + private void writeAsyncWithPriority( final T obj, final BlobContainer blobContainer, final String name, final Compressor compressor, + final WritePriority priority, ActionListener listener, final ToXContent.Params params ) throws IOException { @@ -222,7 +257,7 @@ public void writeAsync( } final String blobName = blobName(name); final BytesReference bytes = serialize(obj, blobName, compressor, params); - final String resourceDescription = "ChecksumBlobStoreFormat.writeAsync(blob=\"" + blobName + "\")"; + final String resourceDescription = "ChecksumBlobStoreFormat.writeAsyncWithPriority(blob=\"" + blobName + "\")"; try (IndexInput input = new ByteArrayIndexInput(resourceDescription, BytesReference.toBytes(bytes))) { long expectedChecksum; try { @@ -242,7 +277,7 @@ public void writeAsync( blobName, bytes.length(), true, - WritePriority.HIGH, + priority, (size, position) -> new OffsetRangeIndexInputStream(input, size, position), expectedChecksum, ((AsyncMultiStreamBlobContainer) blobContainer).remoteIntegrityCheckSupported() diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 4be5fc03c2a6d..173e15b8eca37 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -273,7 +273,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { new BytesArray(writtenBytes) ); - assertEquals(capturedWriteContext.getWritePriority(), WritePriority.HIGH); + assertEquals(capturedWriteContext.getWritePriority(), WritePriority.URGENT); assertEquals(writtenIndexMetadata.getNumberOfShards(), 1); assertEquals(writtenIndexMetadata.getNumberOfReplicas(), 0); assertEquals(writtenIndexMetadata.getIndex().getName(), "test-index"); diff --git a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java index c114b56bd0b39..c5f36fcc01983 100644 --- a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java +++ b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java @@ -43,6 +43,7 @@ import org.opensearch.common.blobstore.fs.FsBlobStore; import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.io.Streams; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -65,8 +66,13 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; +import org.mockito.ArgumentCaptor; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThan; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; public class BlobStoreFormatTests extends OpenSearchTestCase { @@ -128,44 +134,36 @@ public void testBlobStoreAsyncOperations() throws IOException, InterruptedExcept BlobPath.cleanPath(), null ); + MockFsVerifyingBlobContainer spyContainer = spy(mockBlobContainer); ChecksumBlobStoreFormat checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent); - + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + ArgumentCaptor writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); CountDownLatch latch = new CountDownLatch(2); - ActionListener actionListener = new ActionListener<>() { - @Override - public void onResponse(Void unused) { - logger.info("---> Async write succeeded"); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - logger.info("---> Failure in async write"); - throw new RuntimeException("async write should not fail"); - } - }; - // Write blobs in different formats checksumSMILE.writeAsync( new BlobObj("checksum smile"), - mockBlobContainer, + spyContainer, "check-smile", CompressorRegistry.none(), - actionListener, + getVoidActionListener(latch), ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS ); checksumSMILE.writeAsync( new BlobObj("checksum smile compressed"), - mockBlobContainer, + spyContainer, "check-smile-comp", CompressorRegistry.getCompressor(DeflateCompressor.NAME), - actionListener, + getVoidActionListener(latch), ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS ); latch.await(); + verify(spyContainer, times(2)).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); + assertEquals(2, writeContextArgumentCaptor.getAllValues().size()); + writeContextArgumentCaptor.getAllValues() + .forEach(writeContext -> assertEquals(WritePriority.NORMAL, writeContext.getWritePriority())); // Assert that all checksum blobs can be read assertEquals(checksumSMILE.read(mockBlobContainer.getDelegate(), "check-smile", xContentRegistry()).getText(), "checksum smile"); assertEquals( @@ -174,6 +172,39 @@ public void onFailure(Exception e) { ); } + public void testBlobStorePriorityAsyncOperation() throws IOException, InterruptedException { + BlobStore blobStore = createTestBlobStore(); + MockFsVerifyingBlobContainer mockBlobContainer = new MockFsVerifyingBlobContainer( + (FsBlobStore) blobStore, + BlobPath.cleanPath(), + null + ); + MockFsVerifyingBlobContainer spyContainer = spy(mockBlobContainer); + ChecksumBlobStoreFormat checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent); + + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + ArgumentCaptor writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); + CountDownLatch latch = new CountDownLatch(1); + + // Write blobs in different formats + checksumSMILE.writeAsyncWithUrgentPriority( + new BlobObj("cluster state diff"), + spyContainer, + "cluster-state-diff", + CompressorRegistry.none(), + getVoidActionListener(latch), + ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS + ); + latch.await(); + + verify(spyContainer).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); + assertEquals(WritePriority.URGENT, writeContextArgumentCaptor.getValue().getWritePriority()); + assertEquals( + checksumSMILE.read(mockBlobContainer.getDelegate(), "cluster-state-diff", xContentRegistry()).getText(), + "cluster state diff" + ); + } + public void testBlobStoreOperations() throws IOException { BlobStore blobStore = createTestBlobStore(); BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); @@ -228,6 +259,24 @@ public void testBlobCorruption() throws IOException { } } + private ActionListener getVoidActionListener(CountDownLatch latch) { + ActionListener actionListener = new ActionListener<>() { + @Override + public void onResponse(Void unused) { + logger.info("---> Async write succeeded"); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + logger.info("---> Failure in async write"); + throw new RuntimeException("async write should not fail"); + } + }; + + return actionListener; + } + protected BlobStore createTestBlobStore() throws IOException { return new FsBlobStore(randomIntBetween(1, 8) * 1024, createTempDir(), false); } From 54e74a84437238c6154b0a15d209a9a1ecbaa4bb Mon Sep 17 00:00:00 2001 From: Aman Khare <85096200+amkhar@users.noreply.github.com> Date: Tue, 24 Oct 2023 14:28:29 +0530 Subject: [PATCH 081/178] Add cluster state stats (#10670) * Add cluster state update stats along with remote upload stats around success/ failure, latency metric Signed-off-by: Aman Khare --- CHANGELOG.md | 1 + .../discovery/ClusterManagerDisruptionIT.java | 3 + .../remote/RemoteClusterStateServiceIT.java | 43 ++++++ .../coordination/CoordinationState.java | 6 + .../cluster/coordination/Coordinator.java | 12 +- .../coordination/InMemoryPersistedState.java | 5 + .../coordination/PersistedStateStats.java | 126 ++++++++++++++++++ .../cluster/service/ClusterStateStats.java | 120 +++++++++++++++++ .../cluster/service/MasterService.java | 23 +++- .../opensearch/discovery/DiscoveryStats.java | 21 ++- .../opensearch/gateway/GatewayMetaState.java | 13 ++ .../remote/RemoteClusterStateService.java | 20 ++- .../remote/RemotePersistenceStats.java | 37 +++++ .../cluster/node/stats/NodeStatsTests.java | 29 +++- .../cluster/service/MasterServiceTests.java | 3 + .../GatewayMetaStatePersistedStateTests.java | 22 +++ .../RemoteClusterStateServiceTests.java | 34 +++++ .../AbstractCoordinatorTestCase.java | 5 + 18 files changed, 511 insertions(+), 12 deletions(-) create mode 100644 server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java create mode 100644 server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java create mode 100644 server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b0d9720ad208..8c7e3ee151d64 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -96,6 +96,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) - [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) - Added cluster setting cluster.restrict.index.replication_type to restrict setting of index setting replication type ([#10866](https://github.com/opensearch-project/OpenSearch/pull/10866)) +- Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) ### Dependencies - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index 1463c45aa9b2f..79f6ba6dfa642 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -199,6 +200,8 @@ public void testIsolateClusterManagerAndVerifyClusterStateConsensus() throws Exc } } + ClusterStateStats clusterStateStats = internalCluster().clusterService().getClusterManagerService().getClusterStateStats(); + assertTrue(clusterStateStats.getUpdateFailed() > 0); }); } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java index 7304304e522f8..59eef3c06844b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -8,9 +8,12 @@ package org.opensearch.gateway.remote; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; +import org.opensearch.discovery.DiscoveryStats; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -19,6 +22,7 @@ import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.Map; +import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; @@ -94,6 +98,45 @@ public void testFullClusterRestoreStaleDelete() throws Exception { assertEquals(shardCount, indexMetadataMap.values().stream().findFirst().get().getNumberOfShards()); } + public void testRemoteStateStats() { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + String clusterManagerNode = internalCluster().getClusterManagerName(); + String dataNode = internalCluster().getDataNodeNames().stream().collect(Collectors.toList()).get(0); + + // Fetch _nodes/stats + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(clusterManagerNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + + // assert cluster state stats + DiscoveryStats discoveryStats = nodesStatsResponse.getNodes().get(0).getDiscoveryStats(); + + assertNotNull(discoveryStats.getClusterStateStats()); + assertTrue(discoveryStats.getClusterStateStats().getUpdateSuccess() > 1); + assertEquals(0, discoveryStats.getClusterStateStats().getUpdateFailed()); + assertTrue(discoveryStats.getClusterStateStats().getUpdateTotalTimeInMillis() > 0); + // assert remote state stats + assertTrue(discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getSuccessCount() > 1); + assertEquals(0, discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getFailedCount()); + assertTrue(discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getTotalTimeInMillis() > 0); + + NodesStatsResponse nodesStatsResponseDataNode = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + // assert cluster state stats for data node + DiscoveryStats dataNodeDiscoveryStats = nodesStatsResponseDataNode.getNodes().get(0).getDiscoveryStats(); + assertNotNull(dataNodeDiscoveryStats.getClusterStateStats()); + assertEquals(0, dataNodeDiscoveryStats.getClusterStateStats().getUpdateSuccess()); + } + private void setReplicaCount(int replicaCount) { client().admin() .indices() diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index a339852e6ed8d..987a3e3ffa7d3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -638,6 +638,12 @@ public interface PersistedState extends Closeable { */ void setLastAcceptedState(ClusterState clusterState); + /** + * Returns the stats for the persistence layer for {@link CoordinationState}. + * @return PersistedStateStats + */ + PersistedStateStats getStats(); + /** * Marks the last accepted cluster state as committed. * After a successful call to this method, {@link #getLastAcceptedState()} should return the last cluster state that was set, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index eb30460ca1b7f..a4ffab7fb70c9 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -56,6 +56,7 @@ import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplier.ClusterApplyListener; import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; @@ -865,7 +866,16 @@ protected void doStart() { @Override public DiscoveryStats stats() { - return new DiscoveryStats(new PendingClusterStateStats(0, 0, 0), publicationHandler.stats()); + ClusterStateStats clusterStateStats = clusterManagerService.getClusterStateStats(); + ArrayList stats = new ArrayList<>(); + Stream.of(PersistedStateRegistry.PersistedStateType.values()).forEach(stateType -> { + if (persistedStateRegistry.getPersistedState(stateType) != null + && persistedStateRegistry.getPersistedState(stateType).getStats() != null) { + stats.add(persistedStateRegistry.getPersistedState(stateType).getStats()); + } + }); + clusterStateStats.setPersistenceStats(stats); + return new DiscoveryStats(new PendingClusterStateStats(0, 0, 0), publicationHandler.stats(), clusterStateStats); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java b/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java index 67ef82ee7b2e9..b77ede5471534 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java @@ -65,6 +65,11 @@ public void setLastAcceptedState(ClusterState clusterState) { this.acceptedState = clusterState; } + @Override + public PersistedStateStats getStats() { + return null; + } + @Override public long getCurrentTerm() { return currentTerm; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java new file mode 100644 index 0000000000000..1dc20e564ade2 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Persisted cluster state related stats. + * + * @opensearch.internal + */ +public class PersistedStateStats implements Writeable, ToXContentObject { + private String statsName; + private AtomicLong totalTimeInMillis = new AtomicLong(0); + private AtomicLong failedCount = new AtomicLong(0); + private AtomicLong successCount = new AtomicLong(0); + private Map extendedFields = new HashMap<>(); // keeping minimal extensibility + + public PersistedStateStats(String statsName) { + this.statsName = statsName; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(successCount.get()); + out.writeVLong(failedCount.get()); + out.writeVLong(totalTimeInMillis.get()); + if (extendedFields.size() > 0) { + out.writeBoolean(true); + out.writeVInt(extendedFields.size()); + for (Map.Entry extendedField : extendedFields.entrySet()) { + out.writeString(extendedField.getKey()); + out.writeVLong(extendedField.getValue().get()); + } + } else { + out.writeBoolean(false); + } + } + + public PersistedStateStats(StreamInput in) throws IOException { + this.successCount = new AtomicLong(in.readVLong()); + this.failedCount = new AtomicLong(in.readVLong()); + this.totalTimeInMillis = new AtomicLong(in.readVLong()); + if (in.readBoolean()) { + int extendedFieldsSize = in.readVInt(); + this.extendedFields = new HashMap<>(); + for (int fieldNumber = 0; fieldNumber < extendedFieldsSize; fieldNumber++) { + extendedFields.put(in.readString(), new AtomicLong(in.readVLong())); + } + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(statsName); + builder.field(Fields.SUCCESS_COUNT, getSuccessCount()); + builder.field(Fields.FAILED_COUNT, getFailedCount()); + builder.field(Fields.TOTAL_TIME_IN_MILLIS, getTotalTimeInMillis()); + if (extendedFields.size() > 0) { + for (Map.Entry extendedField : extendedFields.entrySet()) { + builder.field(extendedField.getKey(), extendedField.getValue().get()); + } + } + builder.endObject(); + return builder; + } + + public void stateFailed() { + failedCount.incrementAndGet(); + } + + public void stateSucceeded() { + successCount.incrementAndGet(); + } + + /** + * Expects user to send time taken in milliseconds. + * + * @param timeTakenInUpload time taken in uploading the cluster state to remote + */ + public void stateTook(long timeTakenInUpload) { + totalTimeInMillis.addAndGet(timeTakenInUpload); + } + + public long getTotalTimeInMillis() { + return totalTimeInMillis.get(); + } + + public long getFailedCount() { + return failedCount.get(); + } + + public long getSuccessCount() { + return successCount.get(); + } + + protected void addToExtendedFields(String extendedField, AtomicLong extendedFieldValue) { + this.extendedFields.put(extendedField, extendedFieldValue); + } + + /** + * Fields for parsing and toXContent + * + * @opensearch.internal + */ + static final class Fields { + static final String SUCCESS_COUNT = "success_count"; + static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis"; + static final String FAILED_COUNT = "failed_count"; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java b/server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java new file mode 100644 index 0000000000000..96683ce720d0b --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java @@ -0,0 +1,120 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.service; + +import org.opensearch.cluster.coordination.PersistedStateStats; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Cluster state related stats. + * + * @opensearch.internal + */ +public class ClusterStateStats implements Writeable, ToXContentObject { + + private AtomicLong updateSuccess = new AtomicLong(0); + private AtomicLong updateTotalTimeInMillis = new AtomicLong(0); + private AtomicLong updateFailed = new AtomicLong(0); + private List persistenceStats = new ArrayList<>(); + + public ClusterStateStats() {} + + public long getUpdateSuccess() { + return updateSuccess.get(); + } + + public long getUpdateTotalTimeInMillis() { + return updateTotalTimeInMillis.get(); + } + + public long getUpdateFailed() { + return updateFailed.get(); + } + + public List getPersistenceStats() { + return persistenceStats; + } + + public void stateUpdated() { + updateSuccess.incrementAndGet(); + } + + public void stateUpdateFailed() { + updateFailed.incrementAndGet(); + } + + public void stateUpdateTook(long stateUpdateTime) { + updateTotalTimeInMillis.addAndGet(stateUpdateTime); + } + + public ClusterStateStats setPersistenceStats(List persistenceStats) { + this.persistenceStats = persistenceStats; + return this; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(updateSuccess.get()); + out.writeVLong(updateTotalTimeInMillis.get()); + out.writeVLong(updateFailed.get()); + out.writeVInt(persistenceStats.size()); + for (PersistedStateStats stats : persistenceStats) { + stats.writeTo(out); + } + } + + public ClusterStateStats(StreamInput in) throws IOException { + this.updateSuccess = new AtomicLong(in.readVLong()); + this.updateTotalTimeInMillis = new AtomicLong(in.readVLong()); + this.updateFailed = new AtomicLong(in.readVLong()); + int persistedStatsSize = in.readVInt(); + this.persistenceStats = new ArrayList<>(); + for (int statsNumber = 0; statsNumber < persistedStatsSize; statsNumber++) { + PersistedStateStats stats = new PersistedStateStats(in); + this.persistenceStats.add(stats); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.CLUSTER_STATE_STATS); + builder.startObject(Fields.OVERALL); + builder.field(Fields.UPDATE_COUNT, getUpdateSuccess()); + builder.field(Fields.TOTAL_TIME_IN_MILLIS, getUpdateTotalTimeInMillis()); + builder.field(Fields.FAILED_COUNT, getUpdateFailed()); + builder.endObject(); + for (PersistedStateStats stats : persistenceStats) { + stats.toXContent(builder, params); + } + builder.endObject(); + return builder; + } + + /** + * Fields for parsing and toXContent + * + * @opensearch.internal + */ + static final class Fields { + static final String CLUSTER_STATE_STATS = "cluster_state_stats"; + static final String OVERALL = "overall"; + static final String UPDATE_COUNT = "update_count"; + static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis"; + static final String FAILED_COUNT = "failed_count"; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index 563b69dfd0e2a..07c3f93ae6486 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -112,7 +112,9 @@ public class MasterService extends AbstractLifecycleComponent { static final String CLUSTER_MANAGER_UPDATE_THREAD_NAME = "clusterManagerService#updateTask"; - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #CLUSTER_MANAGER_UPDATE_THREAD_NAME} */ + /** + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #CLUSTER_MANAGER_UPDATE_THREAD_NAME} + */ @Deprecated static final String MASTER_UPDATE_THREAD_NAME = "masterService#updateTask"; @@ -130,6 +132,7 @@ public class MasterService extends AbstractLifecycleComponent { private volatile Batcher taskBatcher; protected final ClusterManagerTaskThrottler clusterManagerTaskThrottler; private final ClusterManagerThrottlingStats throttlingStats; + private final ClusterStateStats stateStats; public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { this.nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); @@ -147,6 +150,7 @@ public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadP this::getMinNodeVersion, throttlingStats ); + this.stateStats = new ClusterStateStats(); this.threadPool = threadPool; } @@ -339,7 +343,7 @@ private TimeValue getTimeSince(long startTimeNanos) { return TimeValue.timeValueMillis(TimeValue.nsecToMSec(threadPool.preciseRelativeTimeInNanos() - startTimeNanos)); } - protected void publish(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeMillis) { + protected void publish(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeNanos) { final PlainActionFuture fut = new PlainActionFuture() { @Override protected boolean blockingAllowed() { @@ -352,8 +356,12 @@ protected boolean blockingAllowed() { try { FutureUtils.get(fut); onPublicationSuccess(clusterChangedEvent, taskOutputs); + final long durationMillis = getTimeSince(startTimeNanos).millis(); + stateStats.stateUpdateTook(durationMillis); + stateStats.stateUpdated(); } catch (Exception e) { - onPublicationFailed(clusterChangedEvent, taskOutputs, startTimeMillis, e); + stateStats.stateUpdateFailed(); + onPublicationFailed(clusterChangedEvent, taskOutputs, startTimeNanos, e); } } @@ -464,7 +472,6 @@ public Builder incrementVersion(ClusterState clusterState) { * @param source the source of the cluster state update task * @param updateTask the full context for the cluster state update * task - * */ public & ClusterStateTaskListener> void submitStateUpdateTask( String source, @@ -490,7 +497,6 @@ public & Cluster * @param listener callback after the cluster state update task * completes * @param the type of the cluster state update task state - * */ public void submitStateUpdateTask( String source, @@ -947,7 +953,7 @@ void onNoLongerClusterManager() { /** * Functionality for register task key to cluster manager node. * - * @param taskKey - task key of task + * @param taskKey - task key of task * @param throttlingEnabled - throttling is enabled for task or not i.e does data node perform retries on it or not * @return throttling task key which needs to be passed while submitting task to cluster manager */ @@ -966,7 +972,6 @@ public ClusterManagerTaskThrottler.ThrottlingKey registerClusterManagerTask(Stri * that share the same executor will be executed * batches on this executor * @param the type of the cluster state update task state - * */ public void submitStateUpdateTasks( final String source, @@ -996,4 +1001,8 @@ public void submitStateUpdateTasks( } } + public ClusterStateStats getClusterStateStats() { + return stateStats; + } + } diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java b/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java index 665ecf77d7aa7..ea93ccd09ed39 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java @@ -32,8 +32,10 @@ package org.opensearch.discovery; +import org.opensearch.Version; import org.opensearch.cluster.coordination.PendingClusterStateStats; import org.opensearch.cluster.coordination.PublishClusterStateStats; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -51,21 +53,31 @@ public class DiscoveryStats implements Writeable, ToXContentFragment { private final PendingClusterStateStats queueStats; private final PublishClusterStateStats publishStats; + private final ClusterStateStats clusterStateStats; - public DiscoveryStats(PendingClusterStateStats queueStats, PublishClusterStateStats publishStats) { + public DiscoveryStats(PendingClusterStateStats queueStats, PublishClusterStateStats publishStats, ClusterStateStats clusterStateStats) { this.queueStats = queueStats; this.publishStats = publishStats; + this.clusterStateStats = clusterStateStats; } public DiscoveryStats(StreamInput in) throws IOException { queueStats = in.readOptionalWriteable(PendingClusterStateStats::new); publishStats = in.readOptionalWriteable(PublishClusterStateStats::new); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + clusterStateStats = in.readOptionalWriteable(ClusterStateStats::new); + } else { + clusterStateStats = null; + } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(queueStats); out.writeOptionalWriteable(publishStats); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(clusterStateStats); + } } @Override @@ -77,6 +89,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (publishStats != null) { publishStats.toXContent(builder, params); } + if (clusterStateStats != null) { + clusterStateStats.toXContent(builder, params); + } builder.endObject(); return builder; } @@ -92,4 +107,8 @@ public PendingClusterStateStats getQueueStats() { public PublishClusterStateStats getPublishStats() { return publishStats; } + + public ClusterStateStats getClusterStateStats() { + return clusterStateStats; + } } diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index f855449c708d2..350a361a49a62 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -47,6 +47,7 @@ import org.opensearch.cluster.coordination.InMemoryPersistedState; import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; +import org.opensearch.cluster.coordination.PersistedStateStats; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.Manifest; @@ -615,6 +616,12 @@ public void setLastAcceptedState(ClusterState clusterState) { lastAcceptedState = clusterState; } + @Override + public PersistedStateStats getStats() { + // Note: These stats are not published yet, will come in future + return null; + } + private PersistedClusterStateService.Writer getWriterSafe() { final PersistedClusterStateService.Writer writer = persistenceWriter.get(); if (writer == null) { @@ -717,10 +724,16 @@ assert verifyManifestAndClusterState(lastAcceptedManifest, lastAcceptedState) == lastAcceptedManifest = manifest; lastAcceptedState = clusterState; } catch (Exception e) { + remoteClusterStateService.writeMetadataFailed(); handleExceptionOnWrite(e); } } + @Override + public PersistedStateStats getStats() { + return remoteClusterStateService.getStats(); + } + private boolean verifyManifestAndClusterState(ClusterMetadataManifest manifest, ClusterState clusterState) { assert manifest != null : "ClusterMetadataManifest is null"; assert clusterState != null : "ClusterState is null"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 025ad075d83b6..329ebd0dcd2b8 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -159,7 +159,7 @@ public class RemoteClusterStateService implements Closeable { private volatile TimeValue globalMetadataUploadTimeout; private final AtomicBoolean deleteStaleMetadataRunning = new AtomicBoolean(false); - + private final RemotePersistenceStats remoteStateStats; public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 1; public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V1; public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 1; @@ -193,6 +193,7 @@ public RemoteClusterStateService( clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold); clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); + this.remoteStateStats = new RemotePersistenceStats(); } private BlobStoreTransferService getBlobStoreTransferService() { @@ -233,6 +234,8 @@ public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, Stri false ); final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); + remoteStateStats.stateSucceeded(); + remoteStateStats.stateTook(durationMillis); if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { logger.warn( "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + "wrote full state with [{}] indices", @@ -334,6 +337,8 @@ public ClusterMetadataManifest writeIncrementalMetadata( deleteStaleClusterMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), RETAINED_MANIFESTS); final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); + remoteStateStats.stateSucceeded(); + remoteStateStats.stateTook(durationMillis); if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { logger.warn( "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " @@ -1059,6 +1064,10 @@ public static String encodeString(String content) { return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); } + public void writeMetadataFailed() { + getStats().stateFailed(); + } + /** * Exception for IndexMetadata transfer failures to remote */ @@ -1093,7 +1102,7 @@ public GlobalMetadataTransferException(String errorDesc, Throwable cause) { * @param clusterName name of the cluster * @param clusterUUIDs clusteUUIDs for which the remote state needs to be purged */ - private void deleteStaleUUIDsClusterMetadata(String clusterName, List clusterUUIDs) { + void deleteStaleUUIDsClusterMetadata(String clusterName, List clusterUUIDs) { clusterUUIDs.forEach(clusterUUID -> { getBlobStoreTransferService().deleteAsync( ThreadPool.Names.REMOTE_PURGE, @@ -1113,6 +1122,7 @@ public void onFailure(Exception e) { ), e ); + remoteStateStats.cleanUpAttemptFailed(); } } ); @@ -1228,8 +1238,10 @@ private void deleteClusterMetadata( logger.error("Error while fetching Remote Cluster Metadata manifests", e); } catch (IOException e) { logger.error("Error while deleting stale Remote Cluster Metadata files", e); + remoteStateStats.cleanUpAttemptFailed(); } catch (Exception e) { logger.error("Unexpected error while deleting stale Remote Cluster Metadata files", e); + remoteStateStats.cleanUpAttemptFailed(); } } @@ -1260,4 +1272,8 @@ public void deleteStaleClusterUUIDs(ClusterState clusterState, ClusterMetadataMa deleteStaleUUIDsClusterMetadata(clusterName, new ArrayList<>(allClustersUUIDsInRemote)); }); } + + public RemotePersistenceStats getStats() { + return remoteStateStats; + } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java new file mode 100644 index 0000000000000..f2330846fa23e --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.cluster.coordination.PersistedStateStats; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Remote state related extended stats. + * + * @opensearch.internal + */ +public class RemotePersistenceStats extends PersistedStateStats { + static final String CLEANUP_ATTEMPT_FAILED_COUNT = "cleanup_attempt_failed_count"; + static final String REMOTE_UPLOAD = "remote_upload"; + private AtomicLong cleanupAttemptFailedCount = new AtomicLong(0); + + public RemotePersistenceStats() { + super(REMOTE_UPLOAD); + addToExtendedFields(CLEANUP_ATTEMPT_FAILED_COUNT, cleanupAttemptFailedCount); + } + + public void cleanUpAttemptFailed() { + cleanupAttemptFailedCount.incrementAndGet(); + } + + public long getCleanupAttemptFailedCount() { + return cleanupAttemptFailedCount.get(); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index ebdd012006fb2..3050d1674a95b 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -36,10 +36,12 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.search.SearchRequestStats; import org.opensearch.cluster.coordination.PendingClusterStateStats; +import org.opensearch.cluster.coordination.PersistedStateStats; import org.opensearch.cluster.coordination.PublishClusterStateStats; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.service.ClusterManagerThrottlingStats; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.metrics.OperationStats; import org.opensearch.core.common.io.stream.StreamInput; @@ -47,6 +49,7 @@ import org.opensearch.core.indices.breaker.AllCircuitBreakerStats; import org.opensearch.core.indices.breaker.CircuitBreakerStats; import org.opensearch.discovery.DiscoveryStats; +import org.opensearch.gateway.remote.RemotePersistenceStats; import org.opensearch.http.HttpStats; import org.opensearch.index.ReplicationStats; import org.opensearch.index.SegmentReplicationRejectionStats; @@ -72,6 +75,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -349,6 +353,25 @@ public void testSerialization() throws IOException { assertEquals(queueStats.getTotal(), deserializedDiscoveryStats.getQueueStats().getTotal()); assertEquals(queueStats.getPending(), deserializedDiscoveryStats.getQueueStats().getPending()); } + ClusterStateStats stateStats = discoveryStats.getClusterStateStats(); + if (stateStats == null) { + assertNull(deserializedDiscoveryStats.getClusterStateStats()); + } else { + assertEquals(stateStats.getUpdateFailed(), deserializedDiscoveryStats.getClusterStateStats().getUpdateFailed()); + assertEquals(stateStats.getUpdateSuccess(), deserializedDiscoveryStats.getClusterStateStats().getUpdateSuccess()); + assertEquals( + stateStats.getUpdateTotalTimeInMillis(), + deserializedDiscoveryStats.getClusterStateStats().getUpdateTotalTimeInMillis() + ); + assertEquals(1, deserializedDiscoveryStats.getClusterStateStats().getPersistenceStats().size()); + PersistedStateStats deserializedRemoteStateStats = deserializedDiscoveryStats.getClusterStateStats() + .getPersistenceStats() + .get(0); + PersistedStateStats remoteStateStats = stateStats.getPersistenceStats().get(0); + assertEquals(remoteStateStats.getFailedCount(), deserializedRemoteStateStats.getFailedCount()); + assertEquals(remoteStateStats.getSuccessCount(), deserializedRemoteStateStats.getSuccessCount()); + assertEquals(remoteStateStats.getTotalTimeInMillis(), deserializedRemoteStateStats.getTotalTimeInMillis()); + } } IngestStats ingestStats = nodeStats.getIngestStats(); IngestStats deserializedIngestStats = deserializedNodeStats.getIngestStats(); @@ -725,12 +748,16 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { ScriptStats scriptStats = frequently() ? new ScriptStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) : null; + ClusterStateStats stateStats = new ClusterStateStats(); + RemotePersistenceStats remoteStateStats = new RemotePersistenceStats(); + stateStats.setPersistenceStats(Arrays.asList(remoteStateStats)); DiscoveryStats discoveryStats = frequently() ? new DiscoveryStats( randomBoolean() ? new PendingClusterStateStats(randomInt(), randomInt(), randomInt()) : null, randomBoolean() ? new PublishClusterStateStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) - : null + : null, + randomBoolean() ? stateStats : null ) : null; IngestStats ingestStats = null; diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index 9cdbe04e0a0e4..4c0ca826f5dcc 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -691,6 +691,9 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS submittedTasksPerThread.get(entry.getKey()).get() ); } + // verify stats values after state is published + assertEquals(1, clusterManagerService.getClusterStateStats().getUpdateSuccess()); + assertEquals(0, clusterManagerService.getClusterStateStats().getUpdateFailed()); } } diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index 1d5c2a0f01b5c..fd113ed4313d7 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -68,6 +68,7 @@ import org.opensearch.gateway.PersistedClusterStateService.Writer; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.gateway.remote.RemotePersistenceStats; import org.opensearch.index.recovery.RemoteStoreRestoreService; import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; @@ -104,6 +105,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -779,6 +781,26 @@ public void testRemotePersistedStateExceptionOnFullStateUpload() throws IOExcept assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(clusterState)); } + public void testRemotePersistedStateFailureStats() throws IOException { + RemotePersistenceStats remoteStateStats = new RemotePersistenceStats(); + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final String previousClusterUUID = "prev-cluster-uuid"; + Mockito.doThrow(IOException.class).when(remoteClusterStateService).writeFullMetadata(Mockito.any(), Mockito.any()); + when(remoteClusterStateService.getStats()).thenReturn(remoteStateStats); + doCallRealMethod().when(remoteClusterStateService).writeMetadataFailed(); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); + + final long clusterTerm = randomNonNegativeLong(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + + assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(clusterState)); + assertEquals(1, remoteClusterStateService.getStats().getFailedCount()); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); + } + public void testGatewayForRemoteState() throws IOException { MockGatewayMetaState gateway = null; try { diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 173e15b8eca37..5a43864f40c0c 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -324,6 +324,7 @@ public void testWriteFullMetadataInParallelFailureForIndexMetadata() throws IOEx RemoteClusterStateService.IndexMetadataTransferException.class, () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) ); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); } public void testFailWriteIncrementalMetadataNonClusterManagerNode() throws IOException { @@ -331,6 +332,7 @@ public void testFailWriteIncrementalMetadataNonClusterManagerNode() throws IOExc remoteClusterStateService.start(); final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata(clusterState, clusterState, null); Assert.assertThat(manifest, nullValue()); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); } public void testFailWriteIncrementalMetadataWhenTermChanged() { @@ -991,6 +993,38 @@ public void testDeleteStaleClusterUUIDs() throws IOException { } } + public void testRemoteStateStats() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid"); + + assertTrue(remoteClusterStateService.getStats() != null); + assertEquals(1, remoteClusterStateService.getStats().getSuccessCount()); + assertEquals(0, remoteClusterStateService.getStats().getCleanupAttemptFailedCount()); + assertEquals(0, remoteClusterStateService.getStats().getFailedCount()); + } + + public void testRemoteStateCleanupFailureStats() throws IOException { + BlobContainer blobContainer = mock(BlobContainer.class); + doThrow(IOException.class).when(blobContainer).delete(); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + remoteClusterStateService.start(); + remoteClusterStateService.deleteStaleUUIDsClusterMetadata("cluster1", Arrays.asList("cluster-uuid1")); + try { + assertBusy(() -> { + // wait for stats to get updated + assertTrue(remoteClusterStateService.getStats() != null); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); + assertEquals(1, remoteClusterStateService.getStats().getCleanupAttemptFailedCount()); + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + public void testFileNames() { final Index index = new Index("test-index", "index-uuid"); final Settings idxSettings = Settings.builder() diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index d24cc24d28579..28d7706fb1493 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -1016,6 +1016,11 @@ public void setLastAcceptedState(ClusterState clusterState) { delegate.setLastAcceptedState(clusterState); } + @Override + public PersistedStateStats getStats() { + return null; + } + @Override public void close() { assertTrue(openPersistedStates.remove(this)); From 6f36752d9e84e95ce2280347cc26b0c9138b2d57 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Tue, 24 Oct 2023 14:31:01 +0530 Subject: [PATCH 082/178] Sync translog to remote on primary activate (#10839) --------- Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../remotestore/RemoteRestoreSnapshotIT.java | 94 +++++++++++++++++++ .../remotestore/RemoteStoreStatsIT.java | 8 +- .../opensearch/index/shard/IndexShard.java | 19 +++- .../index/shard/IndexShardTests.java | 1 + 4 files changed, 116 insertions(+), 6 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 865b2d13f189e..9e0b2a66467de 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -11,6 +11,7 @@ import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.delete.DeleteResponse; @@ -20,8 +21,13 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.index.Index; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; import org.opensearch.snapshots.SnapshotInfo; @@ -32,11 +38,15 @@ import org.junit.Before; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; @@ -345,6 +355,90 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 4); } + public void testRemoteRestoreIndexRestoredFromSnapshot() throws IOException, ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + final int numDocsInIndex1 = randomIntBetween(20, 30); + indexDocuments(client(), indexName1, numDocsInIndex1); + flushAndRefresh(indexName1); + ensureGreen(indexName1); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(indexName1)).get()); + assertFalse(indexExists(indexName1)); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .get(); + + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + + // Make sure remote translog is empty + String indexUUID = client().admin() + .indices() + .prepareGetSettings(indexName1) + .get() + .getSetting(indexName1, IndexMetadata.SETTING_INDEX_UUID); + + Path remoteTranslogMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/metadata"); + Path remoteTranslogDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/data"); + + try ( + Stream translogMetadata = Files.list(remoteTranslogMetadataPath); + Stream translogData = Files.list(remoteTranslogDataPath) + ) { + assertTrue(translogData.count() > 0); + assertTrue(translogMetadata.count() > 0); + } + + // Clear the local data before stopping the node. This will make sure that remote translog is empty. + IndexShard indexShard = getIndexShard(primaryNodeName(indexName1), indexName1); + try (Stream files = Files.list(indexShard.shardPath().resolveTranslog())) { + IOUtils.deleteFilesIgnoringExceptions(files.collect(Collectors.toList())); + } + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(indexName1))); + + ensureRed(indexName1); + + client().admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1).restoreAllShards(false), PlainActionFuture.newFuture()); + + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + } + + protected IndexShard getIndexShard(String node, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); + final Optional shardId = indexService.shardIds().stream().findFirst(); + return shardId.map(indexService::getShard).orElse(null); + } + public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); String primary = internalCluster().startDataOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 5e91176ed0473..b1dbb0a900bc7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -581,21 +581,23 @@ public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exce .getRemoteStoreStats(); Arrays.stream(remoteStoreStats).forEach(statObject -> { RemoteSegmentTransferTracker.Stats segmentStats = statObject.getSegmentStats(); + RemoteTranslogTransferTracker.Stats translogStats = statObject.getTranslogStats(); if (statObject.getShardRouting().primary()) { assertTrue( segmentStats.totalUploadsSucceeded == 1 && segmentStats.totalUploadsStarted == segmentStats.totalUploadsSucceeded && segmentStats.totalUploadsFailed == 0 ); + // On primary shard creation, we upload to remote translog post primary mode activation. + // This changes upload stats to non-zero for primary shard. + assertNonZeroTranslogUploadStatsNoFailures(translogStats); } else { assertTrue( segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted == 0 && segmentStats.directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 ); + assertZeroTranslogUploadStats(translogStats); } - - RemoteTranslogTransferTracker.Stats translogStats = statObject.getTranslogStats(); - assertZeroTranslogUploadStats(translogStats); assertZeroTranslogDownloadStats(translogStats); }); }, 5, TimeUnit.SECONDS); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index fb4e9056153aa..3c348035ebbdd 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -640,7 +640,7 @@ public void updateShardState( if (currentRouting.initializing() && currentRouting.isRelocationTarget() == false && newRouting.active()) { // the cluster-manager started a recovering primary, activate primary mode. replicationTracker.activatePrimaryMode(getLocalCheckpoint()); - ensurePeerRecoveryRetentionLeasesExist(); + postActivatePrimaryMode(); } } else { assert currentRouting.primary() == false : "term is only increased as part of primary promotion"; @@ -711,8 +711,7 @@ public void updateShardState( // are brought up to date. checkpointPublisher.publish(this, getLatestReplicationCheckpoint()); } - - ensurePeerRecoveryRetentionLeasesExist(); + postActivatePrimaryMode(); /* * If this shard was serving as a replica shard when another shard was promoted to primary then * its Lucene index was reset during the primary term transition. In particular, the Lucene index @@ -3393,6 +3392,20 @@ assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingE synchronized (mutex) { replicationTracker.activateWithPrimaryContext(primaryContext); // make changes to primaryMode flag only under mutex } + postActivatePrimaryMode(); + } + + private void postActivatePrimaryMode() { + if (indexSettings.isRemoteStoreEnabled()) { + // We make sure to upload translog (even if it does not contain any operations) to remote translog. + // This helps to get a consistent state in remote store where both remote segment store and remote + // translog contains data. + try { + getEngine().translogManager().syncTranslog(); + } catch (IOException e) { + logger.error("Failed to sync translog to remote from new primary", e); + } + } ensurePeerRecoveryRetentionLeasesExist(); } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 9ef9bec01cb38..fa3cf7676f55c 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -2745,6 +2745,7 @@ public void testRelocatedForRemoteTranslogBackedIndexWithAsyncDurability() throw AllocationId.newRelocation(routing.allocationId()) ); IndexShardTestCase.updateRoutingEntry(indexShard, routing); + indexDoc(indexShard, "_doc", "0"); assertTrue(indexShard.isSyncNeeded()); try { indexShard.relocated(routing.getTargetRelocatingShard().allocationId().getId(), primaryContext -> {}, () -> {}); From 5bd413c588f48589c6fd6c4de4e87550271aecf8 Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Tue, 24 Oct 2023 07:38:18 -0400 Subject: [PATCH 083/178] GHA to verify checklist items completion in PR descriptions (#10800) Signed-off-by: Peter Nied --- .github/workflows/pull-request-checks.yml | 28 +++++++++++++++++++++++ CHANGELOG.md | 1 + 2 files changed, 29 insertions(+) create mode 100644 .github/workflows/pull-request-checks.yml diff --git a/.github/workflows/pull-request-checks.yml b/.github/workflows/pull-request-checks.yml new file mode 100644 index 0000000000000..11998e36c2dbb --- /dev/null +++ b/.github/workflows/pull-request-checks.yml @@ -0,0 +1,28 @@ +name: Pull Request Checks + +on: + pull_request: + types: + [ + opened, + edited, + review_requested, + synchronize, + reopened, + ready_for_review, + ] + +jobs: + verify-description-checklist: + name: Verify Description Checklist + runs-on: ubuntu-latest + steps: + - uses: peternied/check-pull-request-description-checklist@v1 + with: + checklist-items: | + New functionality includes testing. + All tests pass + New functionality has been documented. + New functionality has javadoc added + Commits are signed per the DCO using --signoff + Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c7e3ee151d64..b40878066960a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) - [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) - [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286)) +- GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 From 91ac0846dbeb7379ae0772bf9f144cd628e9deac Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Wed, 25 Oct 2023 14:01:27 +0530 Subject: [PATCH 084/178] link previous cluster uuid to current cluster uuid even if current cluster uuid is not committed (#10832) * link previous cluster uuid to current cluster uuid even if current cluster uuid is not committed Signed-off-by: bansvaru --- .../opensearch/gateway/GatewayMetaState.java | 29 +++++++------- .../GatewayMetaStatePersistedStateTests.java | 39 +++++++++++++++++++ 2 files changed, 52 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index 350a361a49a62..c3056276706a0 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -695,24 +695,21 @@ public void setLastAcceptedState(ClusterState clusterState) { try { final ClusterMetadataManifest manifest; if (shouldWriteFullClusterState(clusterState)) { - if (clusterState.metadata().clusterUUIDCommitted() == true) { - final Optional latestManifest = remoteClusterStateService.getLatestClusterMetadataManifest( - clusterState.getClusterName().value(), + final Optional latestManifest = remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + if (latestManifest.isPresent()) { + // The previous UUID should not change for the current UUID. So fetching the latest manifest + // from remote store and getting the previous UUID. + previousClusterUUID = latestManifest.get().getPreviousClusterUUID(); + } else { + // When the user starts the cluster with remote state disabled but later enables the remote state, + // there will not be any manifest for the current cluster UUID. + logger.error( + "Latest manifest is not present in remote store for cluster UUID: {}", clusterState.metadata().clusterUUID() ); - if (latestManifest.isPresent()) { - // The previous UUID should not change for the current UUID. So fetching the latest manifest - // from remote store and getting the previous UUID. - previousClusterUUID = latestManifest.get().getPreviousClusterUUID(); - } else { - // When the user starts the cluster with remote state disabled but later enables the remote state, - // there will not be any manifest for the current cluster UUID. - logger.error( - "Latest manifest is not present in remote store for cluster UUID: {}", - clusterState.metadata().clusterUUID() - ); - previousClusterUUID = ClusterState.UNKNOWN_UUID; - } } manifest = remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID); } else { diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index fd113ed4313d7..74bae7b5eb7cf 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -87,10 +87,12 @@ import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; @@ -765,6 +767,43 @@ public void testRemotePersistedState() throws IOException { assertThat(remotePersistedState.getLastAcceptedState().metadata().clusterUUIDCommitted(), equalTo(true)); } + public void testRemotePersistedStateNotCommitted() throws IOException { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final String previousClusterUUID = "prev-cluster-uuid"; + final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() + .previousClusterUUID(previousClusterUUID) + .clusterTerm(1L) + .stateVersion(5L) + .build(); + Mockito.when(remoteClusterStateService.getLatestClusterMetadataManifest(Mockito.any(), Mockito.any())) + .thenReturn(Optional.of(manifest)); + Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any())).thenReturn(manifest); + + Mockito.when(remoteClusterStateService.writeIncrementalMetadata(Mockito.any(), Mockito.any(), Mockito.any())).thenReturn(manifest); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState( + remoteClusterStateService, + ClusterState.UNKNOWN_UUID + ); + + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(remotePersistedState.getCurrentTerm(), equalTo(0L)); + + final long clusterTerm = randomNonNegativeLong(); + ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + clusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.getMetadata()).clusterUUID(randomAlphaOfLength(10)).clusterUUIDCommitted(false).build()) + .build(); + + remotePersistedState.setLastAcceptedState(clusterState); + ArgumentCaptor previousClusterUUIDCaptor = ArgumentCaptor.forClass(String.class); + ArgumentCaptor clusterStateCaptor = ArgumentCaptor.forClass(ClusterState.class); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterStateCaptor.capture(), previousClusterUUIDCaptor.capture()); + assertEquals(previousClusterUUID, previousClusterUUIDCaptor.getValue()); + } + public void testRemotePersistedStateExceptionOnFullStateUpload() throws IOException { final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final String previousClusterUUID = "prev-cluster-uuid"; From b5299f13e0ca9a5f6979e8cb50137682e777b095 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Wed, 25 Oct 2023 14:46:44 +0530 Subject: [PATCH 085/178] Delete corrupted file to re-download from remote store (#10891) --------- Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../opensearch/index/shard/IndexShard.java | 5 +- .../index/shard/IndexShardTests.java | 52 +++++++++++++++++++ .../org/opensearch/test/CorruptionUtils.java | 2 +- 3 files changed, 57 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 3c348035ebbdd..5b6257084e440 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4962,7 +4962,8 @@ private String copySegmentFiles( return segmentNFile; } - private boolean localDirectoryContains(Directory localDirectory, String file, long checksum) { + // Visible for testing + boolean localDirectoryContains(Directory localDirectory, String file, long checksum) throws IOException { try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { if (checksum == CodecUtil.retrieveChecksum(indexInput)) { return true; @@ -4981,6 +4982,8 @@ private boolean localDirectoryContains(Directory localDirectory, String file, lo logger.debug("File {} does not exist in local FS, downloading from remote store", file); } catch (IOException e) { logger.warn("Exception while reading checksum of file: {}, this can happen if file is corrupted", file); + // For any other exception on reading checksum, we delete the file to re-download again + localDirectory.deleteFile(file); } return false; } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index fa3cf7676f55c..f5f8cd1dcfb3f 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -32,6 +32,7 @@ package org.opensearch.index.shard; import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; @@ -45,6 +46,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; import org.apache.lucene.tests.mockfile.ExtrasFS; import org.apache.lucene.tests.store.BaseDirectoryWrapper; import org.apache.lucene.util.BytesRef; @@ -91,6 +93,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.util.FileSystemUtils; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -163,11 +166,13 @@ import org.junit.Assert; import java.io.IOException; +import java.nio.channels.FileChannel; import java.nio.charset.Charset; import java.nio.file.FileVisitResult; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; +import java.nio.file.StandardOpenOption; import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Arrays; @@ -4907,6 +4912,53 @@ public void testRecordsForceMerges() throws IOException { closeShards(shard); } + public void testLocalDirectoryContains() throws IOException { + IndexShard indexShard = newStartedShard(true); + int numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Integer.toString(i)); + } + flushShard(indexShard); + indexShard.store().incRef(); + Directory localDirectory = indexShard.store().directory(); + Path shardPath = indexShard.shardPath().getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + Path tempDir = createTempDir(); + for (String file : localDirectory.listAll()) { + if (file.equals("write.lock") || file.startsWith("extra")) { + continue; + } + boolean corrupted = randomBoolean(); + long checksum = 0; + try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { + checksum = CodecUtil.retrieveChecksum(indexInput); + } + if (corrupted) { + Files.copy(shardPath.resolve(file), tempDir.resolve(file)); + try (FileChannel raf = FileChannel.open(shardPath.resolve(file), StandardOpenOption.READ, StandardOpenOption.WRITE)) { + CorruptionUtils.corruptAt(shardPath.resolve(file), raf, (int) (raf.size() - 8)); + } + } + if (corrupted == false) { + assertTrue(indexShard.localDirectoryContains(localDirectory, file, checksum)); + } else { + assertFalse(indexShard.localDirectoryContains(localDirectory, file, checksum)); + assertFalse(Files.exists(shardPath.resolve(file))); + } + } + try (Stream files = Files.list(tempDir)) { + files.forEach(p -> { + try { + Files.copy(p, shardPath.resolve(p.getFileName())); + } catch (IOException e) { + // Ignore + } + }); + } + FileSystemUtils.deleteSubDirectories(tempDir); + indexShard.store().decRef(); + closeShards(indexShard); + } + private void populateSampleRemoteSegmentStats(RemoteSegmentTransferTracker tracker) { tracker.addUploadBytesStarted(30L); tracker.addUploadBytesSucceeded(10L); diff --git a/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java b/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java index 0dce5e78bf91f..67522bb618cf1 100644 --- a/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java @@ -121,7 +121,7 @@ public static void corruptFile(Random random, Path... files) throws IOException } } - static void corruptAt(Path path, FileChannel channel, int position) throws IOException { + public static void corruptAt(Path path, FileChannel channel, int position) throws IOException { // read channel.position(position); long filePointer = channel.position(); From a890e518aea1a706249001c3420c1740bd1a06dd Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Wed, 25 Oct 2023 20:05:07 +0530 Subject: [PATCH 086/178] Fix exception handling for global metadata upload (#10889) Signed-off-by: Dhwanil Patel --- .../gateway/remote/RemoteClusterStateService.java | 8 ++++++-- .../gateway/remote/RemoteClusterStateServiceTests.java | 8 +++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 329ebd0dcd2b8..57b1b972e08c0 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -369,6 +369,8 @@ public ClusterMetadataManifest writeIncrementalMetadata( private String writeGlobalMetadata(ClusterState clusterState) throws IOException { AtomicReference result = new AtomicReference(); + AtomicReference exceptionReference = new AtomicReference(); + final BlobContainer globalMetadataContainer = globalMetadataContainer( clusterState.getClusterName().value(), clusterState.metadata().clusterUUID() @@ -381,7 +383,7 @@ private String writeGlobalMetadata(ClusterState clusterState) throws IOException LatchedActionListener completionListener = new LatchedActionListener<>(ActionListener.wrap(resp -> { logger.trace(String.format(Locale.ROOT, "GlobalMetadata uploaded successfully.")); result.set(globalMetadataContainer.path().buildAsString() + globalMetadataFilename); - }, ex -> { throw new GlobalMetadataTransferException(ex.getMessage(), ex); }), latch); + }, ex -> { exceptionReference.set(ex); }), latch); GLOBAL_METADATA_FORMAT.writeAsyncWithUrgentPriority( clusterState.metadata(), @@ -408,7 +410,9 @@ private String writeGlobalMetadata(ClusterState clusterState) throws IOException Thread.currentThread().interrupt(); throw exception; } - + if (exceptionReference.get() != null) { + throw new GlobalMetadataTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); + } return result.get(); } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 5a43864f40c0c..ca88653f529f6 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -294,7 +294,13 @@ public void testWriteFullMetadataFailureForGlobalMetadata() throws IOException { ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); doAnswer((i) -> { - actionListenerArgumentCaptor.getValue().onFailure(new RuntimeException("Cannot upload to remote")); + // For async write action listener will be called from different thread, replicating same behaviour here. + new Thread(new Runnable() { + @Override + public void run() { + actionListenerArgumentCaptor.getValue().onFailure(new RuntimeException("Cannot upload to remote")); + } + }).start(); return null; }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); From 3e64a7b92192a37546b92b811e04c4155dafbdfd Mon Sep 17 00:00:00 2001 From: Aman Khare <85096200+amkhar@users.noreply.github.com> Date: Wed, 25 Oct 2023 21:13:52 +0530 Subject: [PATCH 087/178] Change version to 2.12.0 for cluster state stats (#10915) Signed-off-by: Aman Khare Co-authored-by: Aman Khare --- .../main/java/org/opensearch/discovery/DiscoveryStats.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java b/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java index ea93ccd09ed39..fb341ac2ac569 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java @@ -64,7 +64,7 @@ public DiscoveryStats(PendingClusterStateStats queueStats, PublishClusterStateSt public DiscoveryStats(StreamInput in) throws IOException { queueStats = in.readOptionalWriteable(PendingClusterStateStats::new); publishStats = in.readOptionalWriteable(PublishClusterStateStats::new); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { clusterStateStats = in.readOptionalWriteable(ClusterStateStats::new); } else { clusterStateStats = null; @@ -75,7 +75,7 @@ public DiscoveryStats(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(queueStats); out.writeOptionalWriteable(publishStats); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(clusterStateStats); } } From 6779633d76052585f5e20df5ac9388e14d75ffb2 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 25 Oct 2023 09:58:16 -0700 Subject: [PATCH 088/178] Fix flaky test testSendCorruptBytesToReplica (#10897) --- .../SegmentReplicationDisruptionIT.java | 167 ++++++++++++++++++ .../replication/SegmentReplicationIT.java | 136 -------------- 2 files changed, 167 insertions(+), 136 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java new file mode 100644 index 0000000000000..66b26b5d25cfe --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.action.admin.indices.recovery.RecoveryResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; + +/** + * These tests simulate corruption cases during replication. They are skipped on WindowsFS simulation where file renaming + * can fail with an access denied IOException because deletion is not permitted. + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +@LuceneTestCase.SuppressFileSystems("WindowsFS") +public class SegmentReplicationDisruptionIT extends SegmentReplicationBaseIT { + @Before + private void setup() { + internalCluster().startClusterManagerOnlyNode(); + } + + public void testSendCorruptBytesToReplica() throws Exception { + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.refresh_interval", -1) + .build() + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + MockTransportService primaryTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + )); + CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean failed = new AtomicBoolean(false); + primaryTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK) && failed.getAndSet(true) == false) { + FileChunkRequest req = (FileChunkRequest) request; + TransportRequest corrupt = new FileChunkRequest( + req.recoveryId(), + ((FileChunkRequest) request).requestSeqNo(), + ((FileChunkRequest) request).shardId(), + ((FileChunkRequest) request).metadata(), + ((FileChunkRequest) request).position(), + new BytesArray("test"), + false, + 0, + 0L + ); + connection.sendRequest(requestId, action, corrupt, options); + latch.countDown(); + } else { + connection.sendRequest(requestId, action, request, options); + } + } + ); + for (int i = 0; i < 100; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + final long originalRecoveryTime = getRecoveryStopTime(replicaNode); + assertNotEquals(originalRecoveryTime, 0); + refresh(INDEX_NAME); + latch.await(); + assertTrue(failed.get()); + waitForNewPeerRecovery(replicaNode, originalRecoveryTime); + // reset checkIndex to ensure our original shard doesn't throw + resetCheckIndexStatus(); + waitForSearchableDocs(100, primaryNode, replicaNode); + } + + public void testWipeSegmentBetweenSyncs() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.refresh_interval", -1) + .build() + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + for (int i = 0; i < 10; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + refresh(INDEX_NAME); + ensureGreen(INDEX_NAME); + final long originalRecoveryTime = getRecoveryStopTime(replicaNode); + + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + waitForSearchableDocs(INDEX_NAME, 10, List.of(replicaNode)); + indexShard.store().directory().deleteFile("_0.si"); + + for (int i = 11; i < 21; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + refresh(INDEX_NAME); + waitForNewPeerRecovery(replicaNode, originalRecoveryTime); + resetCheckIndexStatus(); + waitForSearchableDocs(20, primaryNode, replicaNode); + } + + private void waitForNewPeerRecovery(String replicaNode, long originalRecoveryTime) throws Exception { + assertBusy(() -> { + // assert we have a peer recovery after the original + final long time = getRecoveryStopTime(replicaNode); + assertNotEquals(time, 0); + assertNotEquals(originalRecoveryTime, time); + + }, 1, TimeUnit.MINUTES); + } + + private long getRecoveryStopTime(String nodeName) { + final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(INDEX_NAME).get(); + final List recoveryStates = recoveryResponse.shardRecoveryStates().get(INDEX_NAME); + for (RecoveryState recoveryState : recoveryStates) { + if (recoveryState.getTargetNode().getName().equals(nodeName)) { + return recoveryState.getTimer().stopTime(); + } + } + return 0L; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 81556cc270151..f48df082a25dc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -24,7 +24,6 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.flush.FlushRequest; -import org.opensearch.action.admin.indices.recovery.RecoveryResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.get.GetResponse; @@ -59,7 +58,6 @@ import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.XContentBuilder; @@ -73,7 +71,6 @@ import org.opensearch.index.engine.NRTReplicationReaderManager; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.recovery.FileChunkRequest; -import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.NodeClosedException; @@ -85,7 +82,6 @@ import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; -import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.junit.Before; @@ -98,7 +94,6 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import static java.util.Arrays.asList; @@ -1781,135 +1776,4 @@ public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { assertThat(response.getIndex(), equalTo(INDEX_NAME)); } - - public void testSendCorruptBytesToReplica() throws Exception { - // this test stubs transport calls specific to node-node replication. - assumeFalse( - "Skipping the test as its not compatible with segment replication with remote store.", - segmentReplicationWithRemoteEnabled() - ); - final String primaryNode = internalCluster().startDataOnlyNode(); - createIndex( - INDEX_NAME, - Settings.builder() - .put(indexSettings()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put("index.refresh_interval", -1) - .build() - ); - ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataOnlyNode(); - ensureGreen(INDEX_NAME); - - MockTransportService primaryTransportService = ((MockTransportService) internalCluster().getInstance( - TransportService.class, - primaryNode - )); - CountDownLatch latch = new CountDownLatch(1); - AtomicBoolean failed = new AtomicBoolean(false); - primaryTransportService.addSendBehavior( - internalCluster().getInstance(TransportService.class, replicaNode), - (connection, requestId, action, request, options) -> { - if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK) && failed.getAndSet(true) == false) { - FileChunkRequest req = (FileChunkRequest) request; - logger.info("SENDING CORRUPT file chunk [{}] lastChunk: {}", req, req.lastChunk()); - TransportRequest corrupt = new FileChunkRequest( - req.recoveryId(), - ((FileChunkRequest) request).requestSeqNo(), - ((FileChunkRequest) request).shardId(), - ((FileChunkRequest) request).metadata(), - ((FileChunkRequest) request).position(), - new BytesArray("test"), - false, - 0, - 0L - ); - connection.sendRequest(requestId, action, corrupt, options); - latch.countDown(); - } else { - connection.sendRequest(requestId, action, request, options); - } - } - ); - for (int i = 0; i < 100; i++) { - client().prepareIndex(INDEX_NAME) - .setId(String.valueOf(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); - } - final long originalRecoveryTime = getRecoveryStopTime(replicaNode); - assertNotEquals(originalRecoveryTime, 0); - refresh(INDEX_NAME); - latch.await(); - assertTrue(failed.get()); - waitForNewPeerRecovery(replicaNode, originalRecoveryTime); - // reset checkIndex to ensure our original shard doesn't throw - resetCheckIndexStatus(); - waitForSearchableDocs(100, primaryNode, replicaNode); - } - - public void testWipeSegmentBetweenSyncs() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - final String primaryNode = internalCluster().startDataOnlyNode(); - createIndex( - INDEX_NAME, - Settings.builder() - .put(indexSettings()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put("index.refresh_interval", -1) - .build() - ); - ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataOnlyNode(); - ensureGreen(INDEX_NAME); - - for (int i = 0; i < 10; i++) { - client().prepareIndex(INDEX_NAME) - .setId(String.valueOf(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); - } - refresh(INDEX_NAME); - ensureGreen(INDEX_NAME); - final long originalRecoveryTime = getRecoveryStopTime(replicaNode); - - final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); - waitForSearchableDocs(INDEX_NAME, 10, List.of(replicaNode)); - indexShard.store().directory().deleteFile("_0.si"); - - for (int i = 11; i < 21; i++) { - client().prepareIndex(INDEX_NAME) - .setId(String.valueOf(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); - } - refresh(INDEX_NAME); - waitForNewPeerRecovery(replicaNode, originalRecoveryTime); - resetCheckIndexStatus(); - waitForSearchableDocs(20, primaryNode, replicaNode); - } - - private void waitForNewPeerRecovery(String replicaNode, long originalRecoveryTime) throws Exception { - assertBusy(() -> { - // assert we have a peer recovery after the original - final long time = getRecoveryStopTime(replicaNode); - assertNotEquals(time, 0); - assertNotEquals(originalRecoveryTime, time); - - }, 1, TimeUnit.MINUTES); - } - - private long getRecoveryStopTime(String nodeName) { - final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(INDEX_NAME).get(); - final List recoveryStates = recoveryResponse.shardRecoveryStates().get(INDEX_NAME); - logger.info("Recovery states {}", recoveryResponse); - for (RecoveryState recoveryState : recoveryStates) { - if (recoveryState.getTargetNode().getName().equals(nodeName)) { - return recoveryState.getTimer().stopTime(); - } - } - return 0L; - } } From 44a9f180a5643e3b4dee1150c5a68d1591270ced Mon Sep 17 00:00:00 2001 From: Aman Khare <85096200+amkhar@users.noreply.github.com> Date: Thu, 26 Oct 2023 00:21:12 +0530 Subject: [PATCH 089/178] Fix flaky testClusterStateBatchedUpdates test (#10922) Signed-off-by: Aman Khare Co-authored-by: Aman Khare --- .../org/opensearch/cluster/service/MasterServiceTests.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index 4c0ca826f5dcc..85f6c129944fa 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -487,6 +487,9 @@ public void onFailure(String source, Exception e) { } }); assertBusy(mockAppender::assertAllExpectationsMatched); + // verify stats values after state is published + assertEquals(1, clusterManagerService.getClusterStateStats().getUpdateSuccess()); + assertEquals(0, clusterManagerService.getClusterStateStats().getUpdateFailed()); } } } @@ -691,9 +694,6 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS submittedTasksPerThread.get(entry.getKey()).get() ); } - // verify stats values after state is published - assertEquals(1, clusterManagerService.getClusterStateStats().getUpdateSuccess()); - assertEquals(0, clusterManagerService.getClusterStateStats().getUpdateFailed()); } } From fb6fe1bf4518a4a4a8507564fbd4db1971f29151 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 25 Oct 2023 14:20:48 -0700 Subject: [PATCH 090/178] Fix flaky test IndexShardTests.testLocalDirectoryContains (#10929) This test is breaking for WindowsFS only. Moving it to a separate file where it is skipped on WindowsFS. Signed-off-by: Marc Handalian --- .../index/shard/IndexShardTests.java | 52 ------------- .../RemoteIndexShardCorruptionTests.java | 75 +++++++++++++++++++ 2 files changed, 75 insertions(+), 52 deletions(-) create mode 100644 server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index f5f8cd1dcfb3f..fa3cf7676f55c 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -32,7 +32,6 @@ package org.opensearch.index.shard; import org.apache.logging.log4j.Logger; -import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; @@ -46,7 +45,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexInput; import org.apache.lucene.tests.mockfile.ExtrasFS; import org.apache.lucene.tests.store.BaseDirectoryWrapper; import org.apache.lucene.util.BytesRef; @@ -93,7 +91,6 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; -import org.opensearch.core.util.FileSystemUtils; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -166,13 +163,11 @@ import org.junit.Assert; import java.io.IOException; -import java.nio.channels.FileChannel; import java.nio.charset.Charset; import java.nio.file.FileVisitResult; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; -import java.nio.file.StandardOpenOption; import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Arrays; @@ -4912,53 +4907,6 @@ public void testRecordsForceMerges() throws IOException { closeShards(shard); } - public void testLocalDirectoryContains() throws IOException { - IndexShard indexShard = newStartedShard(true); - int numDocs = between(1, 10); - for (int i = 0; i < numDocs; i++) { - indexDoc(indexShard, "_doc", Integer.toString(i)); - } - flushShard(indexShard); - indexShard.store().incRef(); - Directory localDirectory = indexShard.store().directory(); - Path shardPath = indexShard.shardPath().getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); - Path tempDir = createTempDir(); - for (String file : localDirectory.listAll()) { - if (file.equals("write.lock") || file.startsWith("extra")) { - continue; - } - boolean corrupted = randomBoolean(); - long checksum = 0; - try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { - checksum = CodecUtil.retrieveChecksum(indexInput); - } - if (corrupted) { - Files.copy(shardPath.resolve(file), tempDir.resolve(file)); - try (FileChannel raf = FileChannel.open(shardPath.resolve(file), StandardOpenOption.READ, StandardOpenOption.WRITE)) { - CorruptionUtils.corruptAt(shardPath.resolve(file), raf, (int) (raf.size() - 8)); - } - } - if (corrupted == false) { - assertTrue(indexShard.localDirectoryContains(localDirectory, file, checksum)); - } else { - assertFalse(indexShard.localDirectoryContains(localDirectory, file, checksum)); - assertFalse(Files.exists(shardPath.resolve(file))); - } - } - try (Stream files = Files.list(tempDir)) { - files.forEach(p -> { - try { - Files.copy(p, shardPath.resolve(p.getFileName())); - } catch (IOException e) { - // Ignore - } - }); - } - FileSystemUtils.deleteSubDirectories(tempDir); - indexShard.store().decRef(); - closeShards(indexShard); - } - private void populateSampleRemoteSegmentStats(RemoteSegmentTransferTracker tracker) { tracker.addUploadBytesStarted(30L); tracker.addUploadBytesSucceeded(10L); diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java new file mode 100644 index 0000000000000..21bf580712761 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.test.CorruptionUtils; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.stream.Stream; + +@LuceneTestCase.SuppressFileSystems("WindowsFS") +public class RemoteIndexShardCorruptionTests extends IndexShardTestCase { + + public void testLocalDirectoryContains() throws IOException { + IndexShard indexShard = newStartedShard(true); + int numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Integer.toString(i)); + } + flushShard(indexShard); + indexShard.store().incRef(); + Directory localDirectory = indexShard.store().directory(); + Path shardPath = indexShard.shardPath().getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + Path tempDir = createTempDir(); + for (String file : localDirectory.listAll()) { + if (file.equals("write.lock") || file.startsWith("extra")) { + continue; + } + boolean corrupted = randomBoolean(); + long checksum = 0; + try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { + checksum = CodecUtil.retrieveChecksum(indexInput); + } + if (corrupted) { + Files.copy(shardPath.resolve(file), tempDir.resolve(file)); + try (FileChannel raf = FileChannel.open(shardPath.resolve(file), StandardOpenOption.READ, StandardOpenOption.WRITE)) { + CorruptionUtils.corruptAt(shardPath.resolve(file), raf, (int) (raf.size() - 8)); + } + } + if (corrupted == false) { + assertTrue(indexShard.localDirectoryContains(localDirectory, file, checksum)); + } else { + assertFalse(indexShard.localDirectoryContains(localDirectory, file, checksum)); + assertFalse(Files.exists(shardPath.resolve(file))); + } + } + try (Stream files = Files.list(tempDir)) { + files.forEach(p -> { + try { + Files.copy(p, shardPath.resolve(p.getFileName())); + } catch (IOException e) { + // Ignore + } + }); + } + FileSystemUtils.deleteSubDirectories(tempDir); + indexShard.store().decRef(); + closeShards(indexShard); + } +} From b17d4a8d6fa2463adbf5fc7fbaa1b1bcc4424121 Mon Sep 17 00:00:00 2001 From: Ashish Date: Thu, 26 Oct 2023 06:20:41 +0530 Subject: [PATCH 091/178] [Remote Store] Fix refresh lag bug on primary term change (#10918) * [Remote Store] Fix refresh lag bug on primary term change Signed-off-by: Ashish Singh * Add Integ Tests Signed-off-by: Ashish Singh * Incorporate PR review feedback Signed-off-by: Ashish Singh * Empty-Commit Signed-off-by: Ashish Singh --------- Signed-off-by: Ashish Singh --- .../remotestore/RemoteStoreStatsIT.java | 23 ++++++++++++++++ .../shard/RemoteStoreRefreshListener.java | 27 ++++++++++++------- 2 files changed, 41 insertions(+), 9 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index b1dbb0a900bc7..2d3ab135d0377 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -655,6 +655,29 @@ public void testStatsCorrectnessOnFailover() { logger.info("Test completed"); } + public void testZeroLagOnCreateIndex() throws InterruptedException { + setup(); + String clusterManagerNode = internalCluster().getClusterManagerName(); + + int numOfShards = randomIntBetween(1, 3); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, numOfShards)); + ensureGreen(INDEX_NAME); + long currentTimeNs = System.nanoTime(); + while (currentTimeNs == System.nanoTime()) { + Thread.sleep(10); + } + + for (int i = 0; i < numOfShards; i++) { + RemoteStoreStatsResponse response = client(clusterManagerNode).admin() + .cluster() + .prepareRemoteStoreStats(INDEX_NAME, String.valueOf(i)) + .get(); + for (RemoteStoreStats remoteStoreStats : response.getRemoteStoreStats()) { + assertEquals(0, remoteStoreStats.getSegmentStats().refreshTimeLagMs); + } + } + } + private void indexDocs() { for (int i = 0; i < randomIntBetween(5, 10); i++) { if (randomBoolean()) { diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 3e97b07abfb5d..464adc88ae16f 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -86,7 +86,7 @@ public final class RemoteStoreRefreshListener extends CloseableRetryableRefreshL private final RemoteSegmentStoreDirectory remoteDirectory; private final RemoteSegmentTransferTracker segmentTracker; private final Map localSegmentChecksumMap; - private long primaryTerm; + private volatile long primaryTerm; private volatile Iterator backoffDelayIterator; private final SegmentReplicationCheckpointPublisher checkpointPublisher; @@ -126,10 +126,9 @@ protected void runAfterRefreshExactlyOnce(boolean didRefresh) { // We have 2 separate methods to check if sync needs to be done or not. This is required since we use the return boolean // from isReadyForUpload to schedule refresh retries as the index shard or the primary mode are not in complete // ready state. - if (shouldSync(didRefresh) && isReadyForUpload()) { - segmentTracker.updateLocalRefreshTimeAndSeqNo(); + if (shouldSync(didRefresh, true) && isReadyForUpload()) { try { - initializeRemoteDirectoryOnTermUpdate(); + segmentTracker.updateLocalRefreshTimeAndSeqNo(); try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { Collection localSegmentsPostRefresh = segmentInfosGatedCloseable.get().files(true); updateLocalSizeMapAndTracker(localSegmentsPostRefresh); @@ -150,7 +149,7 @@ protected void runAfterRefreshExactlyOnce(boolean didRefresh) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { boolean successful; - if (shouldSync(didRefresh)) { + if (shouldSync(didRefresh, false)) { successful = syncSegments(); } else { successful = true; @@ -158,10 +157,15 @@ protected boolean performAfterRefreshWithPermit(boolean didRefresh) { return successful; } - private boolean shouldSync(boolean didRefresh) { - return this.primaryTerm != indexShard.getOperationPrimaryTerm() - // If the readers change, didRefresh is always true. - || didRefresh + /** + * This checks if there is a sync required to remote. + * + * @param didRefresh if the readers changed. + * @param skipPrimaryTermCheck consider change in primary term or not for should sync + * @return true if sync is needed + */ + private boolean shouldSync(boolean didRefresh, boolean skipPrimaryTermCheck) { + boolean shouldSync = didRefresh // If the readers change, didRefresh is always true. // The third condition exists for uploading the zero state segments where the refresh has not changed the reader // reference, but it is important to upload the zero state segments so that the restore does not break. || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty() @@ -169,6 +173,10 @@ private boolean shouldSync(boolean didRefresh) { // we update the primary term and the same condition would not evaluate to true again in syncSegments. // Below check ensures that if there is commit, then that gets picked up by both 1st and 2nd shouldSync call. || isRefreshAfterCommitSafe(); + if (shouldSync || skipPrimaryTermCheck) { + return shouldSync; + } + return this.primaryTerm != indexShard.getOperationPrimaryTerm(); } private boolean syncSegments() { @@ -188,6 +196,7 @@ private boolean syncSegments() { try { try { + initializeRemoteDirectoryOnTermUpdate(); // if a new segments_N file is present in local that is not uploaded to remote store yet, it // is considered as a first refresh post commit. A cleanup of stale commit files is triggered. // This is done to avoid delete post each refresh. From 003b2cf30103282b031f4462ed53488edb60b9a4 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Wed, 25 Oct 2023 18:22:19 -0700 Subject: [PATCH 092/178] [Segment Replication] Handle exceptions on local file read during replication (#10933) * Handle exceptions on file read Signed-off-by: Suraj Singh * Address review comments Signed-off-by: Suraj Singh --------- Signed-off-by: Suraj Singh --- .../replication/SegmentReplicationTarget.java | 11 +- .../index/shard/RemoteIndexShardTests.java | 159 ++++++++++++++---- 2 files changed, 134 insertions(+), 36 deletions(-) diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index cd6dbe8af90d9..cc71ef816e525 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -232,6 +232,7 @@ private List getFiles(CheckpointInfoResponse checkpointInfo) return missingFiles; } + // pkg private for tests private boolean validateLocalChecksum(StoreFileMetadata file) { try (IndexInput indexInput = indexShard.store().directory().openInput(file.name(), IOContext.DEFAULT)) { String checksum = Store.digestToString(CodecUtil.retrieveChecksum(indexInput)); @@ -243,7 +244,15 @@ private boolean validateLocalChecksum(StoreFileMetadata file) { return false; } } catch (IOException e) { - throw new UncheckedIOException("Error reading " + file, e); + logger.warn("Error reading " + file, e); + // Delete file on exceptions so that it can be re-downloaded. This is safe to do as this file is local only + // and not referenced by reader. + try { + indexShard.store().directory().deleteFile(file.name()); + } catch (IOException ex) { + throw new UncheckedIOException("Error reading " + file, e); + } + return false; } } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index 703a7d457d5b6..2ce0bdc607189 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -31,18 +31,20 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.CorruptionUtils; import org.hamcrest.MatcherAssert; import org.junit.Assert; import java.io.IOException; +import java.nio.channels.FileChannel; import java.nio.file.Path; +import java.nio.file.StandardOpenOption; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.stream.Collectors; @@ -371,37 +373,9 @@ public void testSegRepSucceedsOnPreviousCopiedFiles() throws Exception { final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); - Runnable[] runAfterGetFiles = { () -> { throw new RuntimeException("Simulated"); }, () -> {} }; - AtomicInteger index = new AtomicInteger(0); - RemoteStoreReplicationSource testRSReplicationSource = new RemoteStoreReplicationSource(replica) { - @Override - public void getCheckpointMetadata( - long replicationId, - ReplicationCheckpoint checkpoint, - ActionListener listener - ) { - super.getCheckpointMetadata(replicationId, checkpoint, listener); - } - - @Override - public void getSegmentFiles( - long replicationId, - ReplicationCheckpoint checkpoint, - List filesToFetch, - IndexShard indexShard, - BiConsumer fileProgressTracker, - ActionListener listener - ) { - super.getSegmentFiles(replicationId, checkpoint, filesToFetch, indexShard, (fileName, bytesRecovered) -> {}, listener); - runAfterGetFiles[index.getAndIncrement()].run(); - } - - @Override - public String getDescription() { - return "TestRemoteStoreReplicationSource"; - } - }; - when(sourceFactory.get(any())).thenReturn(testRSReplicationSource); + when(sourceFactory.get(any())).thenReturn( + getRemoteStoreReplicationSource(replica, () -> { throw new RuntimeException("Simulated"); }) + ); CountDownLatch latch = new CountDownLatch(1); // Start first round of segment replication. This should fail with simulated error but with replica having @@ -412,6 +386,7 @@ public String getDescription() { new SegmentReplicationTargetService.SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { + latch.countDown(); Assert.fail("Replication should fail with simulated error"); } @@ -421,9 +396,9 @@ public void onReplicationFailure( ReplicationFailedException e, boolean sendShardFailure ) { + latch.countDown(); assertFalse(sendShardFailure); logger.error("Replication error", e); - latch.countDown(); } } ); @@ -439,7 +414,8 @@ public void onReplicationFailure( assertEquals("Files should be copied to disk", false, onDiskFiles.isEmpty()); assertEquals(target.state().getStage(), SegmentReplicationState.Stage.GET_FILES); - // Start next round of segment replication + // Start next round of segment replication and not throwing exception resulting in commit on replica + when(sourceFactory.get(any())).thenReturn(getRemoteStoreReplicationSource(replica, () -> {})); CountDownLatch waitForSecondRound = new CountDownLatch(1); final SegmentReplicationTarget newTarget = targetService.startReplication( replica, @@ -456,9 +432,9 @@ public void onReplicationFailure( ReplicationFailedException e, boolean sendShardFailure ) { + waitForSecondRound.countDown(); logger.error("Replication error", e); Assert.fail("Replication should not fail"); - waitForSecondRound.countDown(); } } ); @@ -471,6 +447,119 @@ public void onReplicationFailure( } } + /** + * This test validates that local non-readable (corrupt, partially) on disk are deleted vs failing the + * replication event. This test mimics local files (not referenced by reader) by throwing exception post file copy and + * blocking update of reader. Once this is done, it corrupts one segment file and ensure that file is deleted in next + * round of segment replication by ensuring doc count. + */ + public void testNoFailuresOnFileReads() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int docCount = 10; + shards.indexDocs(docCount); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + when(sourceFactory.get(any())).thenReturn( + getRemoteStoreReplicationSource(replica, () -> { throw new RuntimeException("Simulated"); }) + ); + CountDownLatch waitOnReplicationCompletion = new CountDownLatch(1); + + // Start first round of segment replication. This should fail with simulated error but with replica having + // files in its local store but not in active reader. + SegmentReplicationTarget segmentReplicationTarget = targetService.startReplication( + replica, + primary.getLatestReplicationCheckpoint(), + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + waitOnReplicationCompletion.countDown(); + Assert.fail("Replication should fail with simulated error"); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + waitOnReplicationCompletion.countDown(); + assertFalse(sendShardFailure); + } + } + ); + waitOnReplicationCompletion.await(); + assertBusy(() -> { assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); }); + String fileToCorrupt = null; + // Corrupt one data file + Path shardPath = replica.shardPath().getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + for (String file : replica.store().directory().listAll()) { + if (file.equals("write.lock") || file.startsWith("extra") || file.startsWith("segment")) { + continue; + } + fileToCorrupt = file; + logger.info("--> Corrupting file {}", fileToCorrupt); + try (FileChannel raf = FileChannel.open(shardPath.resolve(file), StandardOpenOption.READ, StandardOpenOption.WRITE)) { + CorruptionUtils.corruptAt(shardPath.resolve(file), raf, (int) (raf.size() - 8)); + } + break; + } + Assert.assertNotNull(fileToCorrupt); + + // Ingest more data and start next round of segment replication + shards.indexDocs(docCount); + primary.refresh("Post corruption"); + replicateSegments(primary, List.of(replica)); + + assertDocCount(primary, 2 * docCount); + assertDocCount(replica, 2 * docCount); + + final Store.RecoveryDiff diff = Store.segmentReplicationDiff(primary.getSegmentMetadataMap(), replica.getSegmentMetadataMap()); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + + // clean up + shards.removeReplica(replica); + closeShards(replica); + } + } + + private RemoteStoreReplicationSource getRemoteStoreReplicationSource(IndexShard shard, Runnable postGetFilesRunnable) { + return new RemoteStoreReplicationSource(shard) { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + super.getCheckpointMetadata(replicationId, checkpoint, listener); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + IndexShard indexShard, + BiConsumer fileProgressTracker, + ActionListener listener + ) { + super.getSegmentFiles(replicationId, checkpoint, filesToFetch, indexShard, (fileName, bytesRecovered) -> {}, listener); + postGetFilesRunnable.run(); + } + + @Override + public String getDescription() { + return "TestRemoteStoreReplicationSource"; + } + }; + } + @Override protected void validateShardIdleWithNoReplicas(IndexShard primary) { // ensure search idle conditions are met. From fe8b2d545da09e3d80f74cd57a7ae991e1005abe Mon Sep 17 00:00:00 2001 From: Ashish Date: Thu, 26 Oct 2023 07:23:18 +0530 Subject: [PATCH 093/178] [Remote Store] Fix shard failure on flush due to upload timeout (#10926) --- .../transfer/TranslogTransferManager.java | 10 +- .../TranslogTransferManagerTests.java | 94 +++++++++++++++++++ 2 files changed, 100 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index ece6f6d5a534f..2f6055df87804 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -42,7 +42,6 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import static org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; @@ -156,14 +155,17 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans try { if (latch.await(TRANSFER_TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS) == false) { - Exception ex = new TimeoutException("Timed out waiting for transfer of snapshot " + transferSnapshot + " to complete"); + Exception ex = new TranslogUploadFailedException( + "Timed out waiting for transfer of snapshot " + transferSnapshot + " to complete" + ); exceptionList.forEach(ex::addSuppressed); throw ex; } } catch (InterruptedException ex) { - exceptionList.forEach(ex::addSuppressed); + Exception exception = new TranslogUploadFailedException("Failed to upload " + transferSnapshot, ex); + exceptionList.forEach(exception::addSuppressed); Thread.currentThread().interrupt(); - throw ex; + throw exception; } if (exceptionList.isEmpty()) { TransferFileSnapshot tlogMetadata = prepareMetadata(transferSnapshot); diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index af596e7df02c2..e34bc078896f9 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.SetOnce; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; @@ -35,6 +36,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.LinkedList; @@ -180,6 +182,93 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { assertEquals(4, fileTransferTracker.allUploaded().size()); } + public void testTransferSnapshotOnUploadTimeout() throws Exception { + doAnswer(invocationOnMock -> { + Thread.sleep(31 * 1000); + return null; + }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ); + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + fileTransferTracker, + remoteTranslogTransferTracker + ); + SetOnce exception = new SetOnce<>(); + translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) {} + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + exception.set(ex); + } + }); + assertNotNull(exception.get()); + assertTrue(exception.get() instanceof TranslogUploadFailedException); + assertEquals("Timed out waiting for transfer of snapshot test-to-string to complete", exception.get().getMessage()); + } + + public void testTransferSnapshotOnThreadInterrupt() throws Exception { + SetOnce uploadThread = new SetOnce<>(); + doAnswer(invocationOnMock -> { + uploadThread.set(new Thread(() -> { + ActionListener listener = invocationOnMock.getArgument(2); + try { + Thread.sleep(31 * 1000); + } catch (InterruptedException ignore) { + List list = new ArrayList<>(invocationOnMock.getArgument(0)); + listener.onFailure(new FileTransferException(list.get(0), ignore)); + } + })); + uploadThread.get().start(); + return null; + }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ); + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + fileTransferTracker, + remoteTranslogTransferTracker + ); + SetOnce exception = new SetOnce<>(); + + Thread thread = new Thread(() -> { + try { + translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) {} + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + exception.set(ex); + } + }); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + thread.start(); + + Thread.sleep(1000); + // Interrupt the thread + thread.interrupt(); + assertBusy(() -> { + assertNotNull(exception.get()); + assertTrue(exception.get() instanceof TranslogUploadFailedException); + assertEquals("Failed to upload test-to-string", exception.get().getMessage()); + }); + uploadThread.get().interrupt(); + } + private TransferSnapshot createTransferSnapshot() { return new TransferSnapshot() { @Override @@ -232,6 +321,11 @@ public Set getTranslogFileSnapshots() { public TranslogTransferMetadata getTranslogTransferMetadata() { return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); } + + @Override + public String toString() { + return "test-to-string"; + } }; } From d1c94b524ea54f3fce3e40e211bb7108800c07de Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Thu, 26 Oct 2023 11:35:37 +0530 Subject: [PATCH 094/178] Improved logging around remote cluster state (#10892) * Improved logging around remote cluster state Signed-off-by: Dhwanil Patel --- .../remote/RemoteClusterStateService.java | 28 +++++++++++++------ .../recovery/RemoteStoreRestoreService.java | 1 + 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 57b1b972e08c0..fa4eef79c46dc 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -244,9 +244,8 @@ public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, Stri allUploadedIndexMetadata.size() ); } else { - // todo change to debug logger.info( - "writing cluster state took [{}ms]; " + "wrote full state with [{}] indices", + "writing cluster state took [{}ms]; " + "wrote full state with [{}] indices and global metadata", durationMillis, allUploadedIndexMetadata.size() ); @@ -285,6 +284,7 @@ public ClusterMetadataManifest writeIncrementalMetadata( if (updateGlobalMetadata || previousManifest.getGlobalMetadataFileName() == null) { globalMetadataFile = writeGlobalMetadata(clusterState); } else { + logger.debug("Global metadata has not updated in cluster state, skipping upload of it"); globalMetadataFile = previousManifest.getGlobalMetadataFileName(); } @@ -305,7 +305,7 @@ public ClusterMetadataManifest writeIncrementalMetadata( for (final IndexMetadata indexMetadata : clusterState.metadata().indices().values()) { final Long previousVersion = previousStateIndexMetadataVersionByName.get(indexMetadata.getIndex().getName()); if (previousVersion == null || indexMetadata.getVersion() != previousVersion) { - logger.trace( + logger.debug( "updating metadata for [{}], changing version from [{}] to [{}]", indexMetadata.getIndex(), previousVersion, @@ -342,18 +342,22 @@ public ClusterMetadataManifest writeIncrementalMetadata( if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { logger.warn( "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " - + "wrote metadata for [{}] indices and skipped [{}] unchanged indices", + + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, global metadata updated : [{}]", durationMillis, slowWriteLoggingThreshold, numIndicesUpdated, - numIndicesUnchanged + numIndicesUnchanged, + updateGlobalMetadata ); } else { - logger.trace( - "writing cluster state took [{}ms]; " + "wrote metadata for [{}] indices and skipped [{}] unchanged indices", + logger.info( + "writing cluster state for version [{}] took [{}ms]; " + + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, global metadata updated : [{}]", + manifest.getStateVersion(), durationMillis, numIndicesUpdated, - numIndicesUnchanged + numIndicesUnchanged, + updateGlobalMetadata ); } return manifest; @@ -605,6 +609,11 @@ private void writeMetadataManifest(String clusterName, String clusterUUID, Clust blobStoreRepository.getCompressor(), FORMAT_PARAMS ); + logger.debug( + "Metadata manifest file [{}] written during [{}] phase. ", + fileName, + uploadManifest.isCommitted() ? "commit" : "publish" + ); } private String fetchPreviousClusterUUID(String clusterName, String clusterUUID) { @@ -912,6 +921,7 @@ private List createClusterChain(final Map { indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); From 746ca09ac3ce33b27e94c871a94f0ded076e19e3 Mon Sep 17 00:00:00 2001 From: Sooraj Sinha <81695996+soosinha@users.noreply.github.com> Date: Thu, 26 Oct 2023 11:55:13 +0530 Subject: [PATCH 095/178] Fix valid cluster UUID logic for uncommitted cluster UUIDs (#10916) Signed-off-by: Sooraj Sinha --- .../remote/RemoteClusterStateService.java | 36 +++++++------- .../RemoteClusterStateServiceTests.java | 49 +++++++++++++++---- 2 files changed, 59 insertions(+), 26 deletions(-) diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index fa4eef79c46dc..b3309b1fd8a63 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -881,25 +881,31 @@ private Map getLatestManifestForAllClusterUUIDs * @return List of cluster UUIDs. The first element is the most recent cluster UUID in the chain */ private List createClusterChain(final Map manifestsByClusterUUID, final String clusterName) { - final Map clusterUUIDGraph = manifestsByClusterUUID.values() + final List validClusterManifests = manifestsByClusterUUID.values() .stream() + .filter(this::isValidClusterUUID) + .collect(Collectors.toList()); + final Map clusterUUIDGraph = validClusterManifests.stream() .collect(Collectors.toMap(ClusterMetadataManifest::getClusterUUID, ClusterMetadataManifest::getPreviousClusterUUID)); - final List validClusterUUIDs = manifestsByClusterUUID.values() - .stream() - .filter(m -> !isInvalidClusterUUID(m) && !clusterUUIDGraph.containsValue(m.getClusterUUID())) + final List topLevelClusterUUIDs = validClusterManifests.stream() .map(ClusterMetadataManifest::getClusterUUID) + .filter(clusterUUID -> !clusterUUIDGraph.containsValue(clusterUUID)) .collect(Collectors.toList()); - if (validClusterUUIDs.isEmpty()) { - logger.info("There is no valid previous cluster UUID"); + + if (topLevelClusterUUIDs.isEmpty()) { + // This can occur only when there are no valid cluster UUIDs + assert validClusterManifests.isEmpty() : "There are no top level cluster UUIDs even when there are valid cluster UUIDs"; + logger.info("There is no valid previous cluster UUID. All cluster UUIDs evaluated are: {}", manifestsByClusterUUID.keySet()); return Collections.emptyList(); } - if (validClusterUUIDs.size() > 1) { + if (topLevelClusterUUIDs.size() > 1) { + logger.info("Top level cluster UUIDs: {}", topLevelClusterUUIDs); // If the valid cluster UUIDs are more that 1, it means there was some race condition where // more then 2 cluster manager nodes tried to become active cluster manager and published // 2 cluster UUIDs which followed the same previous UUID. final Map manifestsByClusterUUIDTrimmed = trimClusterUUIDs( manifestsByClusterUUID, - validClusterUUIDs, + topLevelClusterUUIDs, clusterName ); if (manifestsByClusterUUID.size() == manifestsByClusterUUIDTrimmed.size()) { @@ -908,14 +914,14 @@ private List createClusterChain(final Map validChain = new ArrayList<>(); - String currentUUID = validClusterUUIDs.get(0); + String currentUUID = topLevelClusterUUIDs.get(0); while (currentUUID != null && !ClusterState.UNKNOWN_UUID.equals(currentUUID)) { validChain.add(currentUUID); // Getting the previous cluster UUID of a cluster UUID from the clusterUUID Graph @@ -942,11 +948,7 @@ private Map trimClusterUUIDs( // Here we compare the manifest of current UUID to that of previous UUID // In case currentUUID's latest manifest is same as previous UUIDs latest manifest, // that means it was restored from previousUUID and no IndexMetadata update was performed on it. - if (ClusterState.UNKNOWN_UUID.equals(currentManifest.getPreviousClusterUUID())) { - if (currentManifest.getIndices().isEmpty()) { - trimmedUUIDs.remove(clusterUUID); - } - } else { + if (!ClusterState.UNKNOWN_UUID.equals(currentManifest.getPreviousClusterUUID())) { ClusterMetadataManifest previousManifest = trimmedUUIDs.get(currentManifest.getPreviousClusterUUID()); if (isMetadataEqual(currentManifest, previousManifest, clusterName) && isGlobalMetadataEqual(currentManifest, previousManifest, clusterName)) { @@ -985,8 +987,8 @@ private boolean isGlobalMetadataEqual(ClusterMetadataManifest first, ClusterMeta return Metadata.isGlobalResourcesMetadataEquals(firstGlobalMetadata, secondGlobalMetadata); } - private boolean isInvalidClusterUUID(ClusterMetadataManifest manifest) { - return !manifest.isClusterUUIDCommitted(); + private boolean isValidClusterUUID(ClusterMetadataManifest manifest) { + return manifest.isClusterUUIDCommitted(); } /** diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index ca88653f529f6..586618bd1ecff 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -911,7 +911,7 @@ public void testGetValidPreviousClusterUUIDWithMultipleChains() throws IOExcepti "cluster-uuid3", "cluster-uuid1" ); - mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, randomBoolean()); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, randomBoolean(), Collections.emptyMap()); remoteClusterStateService.start(); String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); @@ -933,6 +933,23 @@ public void testGetValidPreviousClusterUUIDWithInvalidMultipleChains() throws IO assertThrows(IllegalStateException.class, () -> remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")); } + public void testGetValidPreviousClusterUUIDWhenLastUUIDUncommitted() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + "cluster-uuid2" + ); + Map clusterUUIDCommitted = Map.of("cluster-uuid1", true, "cluster-uuid2", true, "cluster-uuid3", false); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, clusterUUIDCommitted); + + remoteClusterStateService.start(); + String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); + assertThat(previousClusterUUID, equalTo("cluster-uuid2")); + } + public void testDeleteStaleClusterUUIDs() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); ClusterMetadataManifest clusterMetadataManifest = ClusterMetadataManifest.builder() @@ -1128,11 +1145,21 @@ public void testGlobalMetadataUploadWaitTimeSetting() { } private void mockObjectsForGettingPreviousClusterUUID(Map clusterUUIDsPointers) throws IOException { - mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, false); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, false, Collections.emptyMap()); } - private void mockObjectsForGettingPreviousClusterUUID(Map clusterUUIDsPointers, boolean differGlobalMetadata) - throws IOException { + private void mockObjectsForGettingPreviousClusterUUID( + Map clusterUUIDsPointers, + Map clusterUUIDCommitted + ) throws IOException { + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, false, clusterUUIDCommitted); + } + + private void mockObjectsForGettingPreviousClusterUUID( + Map clusterUUIDsPointers, + boolean differGlobalMetadata, + Map clusterUUIDCommitted + ) throws IOException { final BlobPath blobPath = mock(BlobPath.class); when((blobStoreRepository.basePath())).thenReturn(blobPath); when(blobPath.add(anyString())).thenReturn(blobPath); @@ -1155,7 +1182,8 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste clusterUUIDsPointers.get("cluster-uuid1"), randomAlphaOfLength(10), uploadedIndexMetadataList1, - "test-metadata1" + "test-metadata1", + clusterUUIDCommitted.getOrDefault("cluster-uuid1", true) ); Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexMetadata indexMetadata1 = IndexMetadata.builder("index1") @@ -1184,7 +1212,8 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste clusterUUIDsPointers.get("cluster-uuid2"), randomAlphaOfLength(10), uploadedIndexMetadataList2, - "test-metadata2" + "test-metadata2", + clusterUUIDCommitted.getOrDefault("cluster-uuid2", true) ); IndexMetadata indexMetadata3 = IndexMetadata.builder("index1") .settings(indexSettings) @@ -1229,7 +1258,8 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste clusterUUIDsPointers.get("cluster-uuid3"), randomAlphaOfLength(10), uploadedIndexMetadataList3, - "test-metadata3" + "test-metadata3", + clusterUUIDCommitted.getOrDefault("cluster-uuid3", true) ); mockBlobContainerForGlobalMetadata(blobContainer3, clusterManifest3, metadata3); mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3, ClusterMetadataManifest.CODEC_V1); @@ -1257,7 +1287,8 @@ private ClusterMetadataManifest generateClusterMetadataManifest( String previousClusterUUID, String stateUUID, List uploadedIndexMetadata, - String globalMetadataFileName + String globalMetadataFileName, + Boolean isUUIDCommitted ) { return ClusterMetadataManifest.builder() .indices(uploadedIndexMetadata) @@ -1269,7 +1300,7 @@ private ClusterMetadataManifest generateClusterMetadataManifest( .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID(previousClusterUUID) .committed(true) - .clusterUUIDCommitted(true) + .clusterUUIDCommitted(isUUIDCommitted) .globalMetadataFileName(globalMetadataFileName) .codecVersion(ClusterMetadataManifest.CODEC_V1) .build(); From 5ae93338e786173e39f5906ea1b2a5129f98ab66 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Thu, 26 Oct 2023 07:42:23 -0700 Subject: [PATCH 096/178] Mute testSegRepSucceedsOnPreviousCopiedFiles and testNoFailuresOnFileReads unit tests (#10942) Signed-off-by: Suraj Singh --- .../java/org/opensearch/index/shard/RemoteIndexShardTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index 2ce0bdc607189..20cec90d79e3e 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -362,6 +362,7 @@ public void testPrimaryRestart() throws Exception { * prevent FileAlreadyExistsException. It does so by only copying files in first round of segment replication without * committing locally so that in next round of segment replication those files are not considered for download again */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10885") public void testSegRepSucceedsOnPreviousCopiedFiles() throws Exception { try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { shards.startAll(); @@ -453,6 +454,7 @@ public void onReplicationFailure( * blocking update of reader. Once this is done, it corrupts one segment file and ensure that file is deleted in next * round of segment replication by ensuring doc count. */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10885") public void testNoFailuresOnFileReads() throws Exception { try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { shards.startAll(); From 0c9fc21ae78babd6820479ea940a4c986d82f10f Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Thu, 26 Oct 2023 16:11:19 -0700 Subject: [PATCH 097/178] Add log for failover time (#10952) Signed-off-by: Poojita Raj --- .../java/org/opensearch/index/shard/IndexShard.java | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 5b6257084e440..352d4efc95269 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -187,6 +187,7 @@ import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; +import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.search.suggest.completion.CompletionStats; @@ -698,7 +699,16 @@ public void updateShardState( if (indexSettings.isSegRepEnabled()) { // this Shard's engine was read only, we need to update its engine before restoring local history from xlog. assert newRouting.primary() && currentRouting.primary() == false; + ReplicationTimer timer = new ReplicationTimer(); + timer.start(); + logger.debug( + "Resetting engine on promotion of shard [{}] to primary, startTime {}\n", + shardId, + timer.startTime() + ); resetEngineToGlobalCheckpoint(); + timer.stop(); + logger.info("Completed engine failover for shard [{}] in: {} ms", shardId, timer.time()); // It is possible an engine can open with a SegmentInfos on a higher gen but the reader does not refresh to // trigger our refresh listener. // Force update the checkpoint post engine reset. From e9affeab3494f4c2ed96a3efd647530b38a315fc Mon Sep 17 00:00:00 2001 From: rishavz_sagar Date: Fri, 27 Oct 2023 14:34:13 +0530 Subject: [PATCH 098/178] Fixing unreferenced file cleanup flaky tests (#10801) Signed-off-by: RS146BIJAY --- .../index/engine/InternalEngineTests.java | 44 ++----------------- 1 file changed, 4 insertions(+), 40 deletions(-) diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index 305c3a3acbf75..81d8bccb86c60 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -40,7 +40,6 @@ import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; import org.apache.logging.log4j.core.filter.RegexFilter; -import org.apache.lucene.codecs.LiveDocsFormat; import org.apache.lucene.document.Field; import org.apache.lucene.document.KeywordField; import org.apache.lucene.document.LongPoint; @@ -3237,22 +3236,10 @@ public void testUnreferencedFileCleanUpOnSegmentMergeFailureWithCleanUpEnabled() MockDirectoryWrapper wrapper = newMockDirectory(); final CountDownLatch cleanupCompleted = new CountDownLatch(1); MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() { - public boolean didFail1; - public boolean didFail2; - @Override public void eval(MockDirectoryWrapper dir) throws IOException { - if (!doFail) { - return; - } - - // Fail segment merge with diskfull during merging terms. - if (callStackContainsAnyOf("mergeTerms") && !didFail1) { - didFail1 = true; - throw new IOException("No space left on device"); - } - if (callStackContains(LiveDocsFormat.class, "writeLiveDocs") && !didFail2) { - didFail2 = true; + // Fail segment merge with diskfull during merging terms + if (callStackContainsAnyOf("mergeTerms")) { throw new IOException("No space left on device"); } } @@ -3325,7 +3312,6 @@ public void onFailedEngine(String reason, Exception e) { segments = engine.segments(false); assertThat(segments.size(), equalTo(2)); - fail.setDoFail(); // IndexWriter can throw either IOException or IllegalStateException depending on whether tragedy is set or not. expectThrowsAnyOf( Arrays.asList(IOException.class, IllegalStateException.class), @@ -3345,20 +3331,10 @@ public void testUnreferencedFileCleanUpOnSegmentMergeFailureWithCleanUpDisabled( MockDirectoryWrapper wrapper = newMockDirectory(); final CountDownLatch cleanupCompleted = new CountDownLatch(1); MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() { - public boolean didFail1; - public boolean didFail2; @Override public void eval(MockDirectoryWrapper dir) throws IOException { - if (!doFail) { - return; - } - if (callStackContainsAnyOf("mergeTerms") && !didFail1) { - didFail1 = true; - throw new IOException("No space left on device"); - } - if (callStackContains(LiveDocsFormat.class, "writeLiveDocs") && !didFail2) { - didFail2 = true; + if (callStackContainsAnyOf("mergeTerms")) { throw new IOException("No space left on device"); } } @@ -3439,7 +3415,6 @@ public void onFailedEngine(String reason, Exception e) { segments = engine.segments(false); assertThat(segments.size(), equalTo(2)); - fail.setDoFail(); // IndexWriter can throw either IOException or IllegalStateException depending on whether tragedy is set or not. expectThrowsAnyOf( Arrays.asList(IOException.class, IllegalStateException.class), @@ -3459,20 +3434,10 @@ public void testUnreferencedFileCleanUpFailsOnSegmentMergeFailureWhenDirectoryCl MockDirectoryWrapper wrapper = newMockDirectory(); final CountDownLatch cleanupCompleted = new CountDownLatch(1); MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() { - public boolean didFail1; - public boolean didFail2; @Override public void eval(MockDirectoryWrapper dir) throws IOException { - if (!doFail) { - return; - } - if (callStackContainsAnyOf("mergeTerms") && !didFail1) { - didFail1 = true; - throw new IOException("No space left on device"); - } - if (callStackContains(LiveDocsFormat.class, "writeLiveDocs") && !didFail2) { - didFail2 = true; + if (callStackContainsAnyOf("mergeTerms")) { throw new IOException("No space left on device"); } } @@ -3537,7 +3502,6 @@ public void onFailedEngine(String reason, Exception e) { segments = engine.segments(false); assertThat(segments.size(), equalTo(2)); - fail.setDoFail(); // Close the store so that unreferenced file cleanup will fail. store.close(); From be65f543d125965d699e493933851d0cfec7e530 Mon Sep 17 00:00:00 2001 From: Ketan Verma <9292653+ketanv3@users.noreply.github.com> Date: Fri, 27 Oct 2023 18:02:29 +0530 Subject: [PATCH 099/178] Remove deprecated classes for Rounding (#10956) Signed-off-by: Ketan Verma --- CHANGELOG.md | 1 + .../benchmark/time/RoundingBenchmark.java | 180 ---- .../common/rounding/DateTimeUnit.java | 99 --- .../opensearch/common/rounding/Rounding.java | 459 ---------- .../common/rounding/package-info.java | 10 - .../org/opensearch/common/RoundingTests.java | 3 +- .../common/rounding/DateTimeUnitTests.java | 75 -- .../common/rounding/RoundingDuelTests.java | 70 -- .../rounding/TimeZoneRoundingTests.java | 822 ------------------ 9 files changed, 2 insertions(+), 1717 deletions(-) delete mode 100644 benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java delete mode 100644 server/src/main/java/org/opensearch/common/rounding/DateTimeUnit.java delete mode 100644 server/src/main/java/org/opensearch/common/rounding/Rounding.java delete mode 100644 server/src/main/java/org/opensearch/common/rounding/package-info.java delete mode 100644 server/src/test/java/org/opensearch/common/rounding/DateTimeUnitTests.java delete mode 100644 server/src/test/java/org/opensearch/common/rounding/RoundingDuelTests.java delete mode 100644 server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index b40878066960a..234b08398f9ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -124,6 +124,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Deprecated ### Removed +- Remove deprecated classes for Rounding ([#10956](https://github.com/opensearch-project/OpenSearch/issues/10956)) ### Fixed - Fix failure in dissect ingest processor parsing empty brackets ([#9225](https://github.com/opensearch-project/OpenSearch/pull/9255)) diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java deleted file mode 100644 index cdbcbfc163191..0000000000000 --- a/benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.benchmark.time; - -import org.opensearch.common.Rounding; -import org.opensearch.common.rounding.DateTimeUnit; -import org.opensearch.common.time.DateUtils; -import org.opensearch.common.unit.TimeValue; -import org.joda.time.DateTimeZone; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Warmup; - -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.util.concurrent.TimeUnit; - -import static org.opensearch.common.Rounding.DateTimeUnit.DAY_OF_MONTH; -import static org.opensearch.common.Rounding.DateTimeUnit.MONTH_OF_YEAR; -import static org.opensearch.common.Rounding.DateTimeUnit.QUARTER_OF_YEAR; -import static org.opensearch.common.Rounding.DateTimeUnit.YEAR_OF_CENTURY; - -@Fork(3) -@Warmup(iterations = 10) -@Measurement(iterations = 10) -@BenchmarkMode(Mode.AverageTime) -@OutputTimeUnit(TimeUnit.NANOSECONDS) -@State(Scope.Benchmark) -@SuppressWarnings("unused") // invoked by benchmarking framework -public class RoundingBenchmark { - - private final ZoneId zoneId = ZoneId.of("Europe/Amsterdam"); - private final DateTimeZone timeZone = DateUtils.zoneIdToDateTimeZone(zoneId); - - private long timestamp = 1548879021354L; - - private final org.opensearch.common.rounding.Rounding jodaRounding = org.opensearch.common.rounding.Rounding.builder( - DateTimeUnit.HOUR_OF_DAY - ).timeZone(timeZone).build(); - private final Rounding javaRounding = Rounding.builder(Rounding.DateTimeUnit.HOUR_OF_DAY).timeZone(zoneId).build(); - - @Benchmark - public long timeRoundingDateTimeUnitJoda() { - return jodaRounding.round(timestamp); - } - - @Benchmark - public long timeRoundingDateTimeUnitJava() { - return javaRounding.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding jodaDayOfMonthRounding = org.opensearch.common.rounding.Rounding.builder( - DateTimeUnit.DAY_OF_MONTH - ).timeZone(timeZone).build(); - private final Rounding javaDayOfMonthRounding = Rounding.builder(DAY_OF_MONTH).timeZone(zoneId).build(); - - @Benchmark - public long timeRoundingDateTimeUnitDayOfMonthJoda() { - return jodaDayOfMonthRounding.round(timestamp); - } - - @Benchmark - public long timeRoundingDateTimeUnitDayOfMonthJava() { - return javaDayOfMonthRounding.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeIntervalRoundingJoda = org.opensearch.common.rounding.Rounding.builder( - TimeValue.timeValueMinutes(60) - ).timeZone(timeZone).build(); - private final Rounding timeIntervalRoundingJava = Rounding.builder(TimeValue.timeValueMinutes(60)).timeZone(zoneId).build(); - - @Benchmark - public long timeIntervalRoundingJava() { - return timeIntervalRoundingJava.round(timestamp); - } - - @Benchmark - public long timeIntervalRoundingJoda() { - return timeIntervalRoundingJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcDayOfMonthJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.DAY_OF_MONTH) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcDayOfMonthJava = Rounding.builder(DAY_OF_MONTH).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcDayOfMonthJava() { - return timeUnitRoundingUtcDayOfMonthJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcDayOfMonthJoda() { - return timeUnitRoundingUtcDayOfMonthJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcQuarterOfYearJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.QUARTER) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcQuarterOfYearJava = Rounding.builder(QUARTER_OF_YEAR).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcQuarterOfYearJava() { - return timeUnitRoundingUtcQuarterOfYearJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcQuarterOfYearJoda() { - return timeUnitRoundingUtcQuarterOfYearJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcMonthOfYearJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.MONTH_OF_YEAR) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcMonthOfYearJava = Rounding.builder(MONTH_OF_YEAR).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcMonthOfYearJava() { - return timeUnitRoundingUtcMonthOfYearJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcMonthOfYearJoda() { - return timeUnitRoundingUtcMonthOfYearJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcYearOfCenturyJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.YEAR_OF_CENTURY) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcYearOfCenturyJava = Rounding.builder(YEAR_OF_CENTURY).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcYearOfCenturyJava() { - return timeUnitRoundingUtcYearOfCenturyJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcYearOfCenturyJoda() { - return timeUnitRoundingUtcYearOfCenturyJoda.round(timestamp); - } -} diff --git a/server/src/main/java/org/opensearch/common/rounding/DateTimeUnit.java b/server/src/main/java/org/opensearch/common/rounding/DateTimeUnit.java deleted file mode 100644 index 47e182b3caf84..0000000000000 --- a/server/src/main/java/org/opensearch/common/rounding/DateTimeUnit.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.OpenSearchException; -import org.opensearch.common.joda.Joda; -import org.joda.time.DateTimeField; -import org.joda.time.DateTimeZone; -import org.joda.time.chrono.ISOChronology; - -import java.util.function.Function; - -/** - * Main date time unit class. - * - * @opensearch.internal - */ -public enum DateTimeUnit { - - WEEK_OF_WEEKYEAR((byte) 1, tz -> ISOChronology.getInstance(tz).weekOfWeekyear()), - YEAR_OF_CENTURY((byte) 2, tz -> ISOChronology.getInstance(tz).yearOfCentury()), - QUARTER((byte) 3, tz -> Joda.QuarterOfYear.getField(ISOChronology.getInstance(tz))), - MONTH_OF_YEAR((byte) 4, tz -> ISOChronology.getInstance(tz).monthOfYear()), - DAY_OF_MONTH((byte) 5, tz -> ISOChronology.getInstance(tz).dayOfMonth()), - HOUR_OF_DAY((byte) 6, tz -> ISOChronology.getInstance(tz).hourOfDay()), - MINUTES_OF_HOUR((byte) 7, tz -> ISOChronology.getInstance(tz).minuteOfHour()), - SECOND_OF_MINUTE((byte) 8, tz -> ISOChronology.getInstance(tz).secondOfMinute()); - - private final byte id; - private final Function fieldFunction; - - DateTimeUnit(byte id, Function fieldFunction) { - this.id = id; - this.fieldFunction = fieldFunction; - } - - public byte id() { - return id; - } - - /** - * @return the {@link DateTimeField} for the provided {@link DateTimeZone} for this time unit - */ - public DateTimeField field(DateTimeZone tz) { - return fieldFunction.apply(tz); - } - - public static DateTimeUnit resolve(byte id) { - switch (id) { - case 1: - return WEEK_OF_WEEKYEAR; - case 2: - return YEAR_OF_CENTURY; - case 3: - return QUARTER; - case 4: - return MONTH_OF_YEAR; - case 5: - return DAY_OF_MONTH; - case 6: - return HOUR_OF_DAY; - case 7: - return MINUTES_OF_HOUR; - case 8: - return SECOND_OF_MINUTE; - default: - throw new OpenSearchException("Unknown date time unit id [" + id + "]"); - } - } -} diff --git a/server/src/main/java/org/opensearch/common/rounding/Rounding.java b/server/src/main/java/org/opensearch/common/rounding/Rounding.java deleted file mode 100644 index 41e808b64f7d9..0000000000000 --- a/server/src/main/java/org/opensearch/common/rounding/Rounding.java +++ /dev/null @@ -1,459 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.OpenSearchException; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.joda.time.DateTimeField; -import org.joda.time.DateTimeZone; -import org.joda.time.IllegalInstantException; - -import java.io.IOException; -import java.util.Objects; - -/** - * A strategy for rounding long values. - *

- * Use the java based Rounding class where applicable - * - * @opensearch.internal - */ -@Deprecated -public abstract class Rounding implements Writeable { - - public abstract byte id(); - - /** - * Rounds the given value. - */ - public abstract long round(long value); - - /** - * Given the rounded value (which was potentially generated by {@link #round(long)}, returns the next rounding value. For example, with - * interval based rounding, if the interval is 3, {@code nextRoundValue(6) = 9 }. - * - * @param value The current rounding value - * @return The next rounding value; - */ - public abstract long nextRoundingValue(long value); - - @Override - public abstract boolean equals(Object obj); - - @Override - public abstract int hashCode(); - - public static Builder builder(DateTimeUnit unit) { - return new Builder(unit); - } - - public static Builder builder(TimeValue interval) { - return new Builder(interval); - } - - /** - * Builder for rounding - * - * @opensearch.internal - */ - public static class Builder { - - private final DateTimeUnit unit; - private final long interval; - - private DateTimeZone timeZone = DateTimeZone.UTC; - - public Builder(DateTimeUnit unit) { - this.unit = unit; - this.interval = -1; - } - - public Builder(TimeValue interval) { - this.unit = null; - if (interval.millis() < 1) throw new IllegalArgumentException("Zero or negative time interval not supported"); - this.interval = interval.millis(); - } - - public Builder timeZone(DateTimeZone timeZone) { - if (timeZone == null) { - throw new IllegalArgumentException("Setting null as timezone is not supported"); - } - this.timeZone = timeZone; - return this; - } - - public Rounding build() { - Rounding timeZoneRounding; - if (unit != null) { - timeZoneRounding = new TimeUnitRounding(unit, timeZone); - } else { - timeZoneRounding = new TimeIntervalRounding(interval, timeZone); - } - return timeZoneRounding; - } - } - - /** - * Rounding time units - * - * @opensearch.internal - */ - static class TimeUnitRounding extends Rounding { - - static final byte ID = 1; - - private final DateTimeUnit unit; - private final DateTimeField field; - private final DateTimeZone timeZone; - private final boolean unitRoundsToMidnight; - - TimeUnitRounding(DateTimeUnit unit, DateTimeZone timeZone) { - this.unit = unit; - this.field = unit.field(timeZone); - unitRoundsToMidnight = this.field.getDurationField().getUnitMillis() > 60L * 60L * 1000L; - this.timeZone = timeZone; - } - - TimeUnitRounding(StreamInput in) throws IOException { - unit = DateTimeUnit.resolve(in.readByte()); - timeZone = DateTimeZone.forID(in.readString()); - field = unit.field(timeZone); - unitRoundsToMidnight = field.getDurationField().getUnitMillis() > 60L * 60L * 1000L; - } - - @Override - public byte id() { - return ID; - } - - /** - * @return The latest timestamp T which is strictly before utcMillis - * and such that timeZone.getOffset(T) != timeZone.getOffset(utcMillis). - * If there is no such T, returns Long.MAX_VALUE. - */ - private long previousTransition(long utcMillis) { - final int offsetAtInputTime = timeZone.getOffset(utcMillis); - do { - // Some timezones have transitions that do not change the offset, so we have to - // repeatedly call previousTransition until a nontrivial transition is found. - - long previousTransition = timeZone.previousTransition(utcMillis); - if (previousTransition == utcMillis) { - // There are no earlier transitions - return Long.MAX_VALUE; - } - assert previousTransition < utcMillis; // Progress was made - utcMillis = previousTransition; - } while (timeZone.getOffset(utcMillis) == offsetAtInputTime); - - return utcMillis; - } - - @Override - public long round(long utcMillis) { - - // field.roundFloor() works as long as the offset doesn't change. It is worth getting this case out of the way first, as - // the calculations for fixing things near to offset changes are a little expensive and are unnecessary in the common case - // of working in UTC. - if (timeZone.isFixed()) { - return field.roundFloor(utcMillis); - } - - // When rounding to hours we consider any local time of the form 'xx:00:00' as rounded, even though this gives duplicate - // bucket names for the times when the clocks go back. Shorter units behave similarly. However, longer units round down to - // midnight, and on the days where there are two midnights we would rather pick the earlier one, so that buckets are - // uniquely identified by the date. - if (unitRoundsToMidnight) { - final long anyLocalStartOfDay = field.roundFloor(utcMillis); - // `anyLocalStartOfDay` is _supposed_ to be the Unix timestamp for the start of the day in question in the current time - // zone. Mostly this just means "midnight", which is fine, and on days with no local midnight it's the first time that - // does occur on that day which is also ok. However, on days with >1 local midnight this is _one_ of the midnights, but - // may not be the first. Check whether this is happening, and fix it if so. - - final long previousTransition = previousTransition(anyLocalStartOfDay); - - if (previousTransition == Long.MAX_VALUE) { - // No previous transitions, so there can't be another earlier local midnight. - return anyLocalStartOfDay; - } - - final long currentOffset = timeZone.getOffset(anyLocalStartOfDay); - final long previousOffset = timeZone.getOffset(previousTransition); - assert currentOffset != previousOffset; - - // NB we only assume interference from one previous transition. It's theoretically possible to have two transitions in - // quick succession, both of which have a midnight in them, but this doesn't appear to happen in the TZDB so (a) it's - // pointless to implement and (b) it won't be tested. I recognise that this comment is tempting fate and will likely - // cause this very situation to occur in the near future, and eagerly look forward to fixing this using a loop over - // previous transitions when it happens. - - final long alsoLocalStartOfDay = anyLocalStartOfDay + currentOffset - previousOffset; - // `alsoLocalStartOfDay` is the Unix timestamp for the start of the day in question if the previous offset were in - // effect. - - if (alsoLocalStartOfDay <= previousTransition) { - // Therefore the previous offset _is_ in effect at `alsoLocalStartOfDay`, and it's earlier than anyLocalStartOfDay, - // so this is the answer to use. - return alsoLocalStartOfDay; - } else { - // The previous offset is not in effect at `alsoLocalStartOfDay`, so the current offset must be. - return anyLocalStartOfDay; - } - - } else { - do { - long rounded = field.roundFloor(utcMillis); - - // field.roundFloor() mostly works as long as the offset hasn't changed in [rounded, utcMillis], so look at where - // the offset most recently changed. - - final long previousTransition = previousTransition(utcMillis); - - if (previousTransition == Long.MAX_VALUE || previousTransition < rounded) { - // The offset did not change in [rounded, utcMillis], so roundFloor() worked as expected. - return rounded; - } - - // The offset _did_ change in [rounded, utcMillis]. Put differently, this means that none of the times in - // [previousTransition+1, utcMillis] were rounded, so the rounded time must be <= previousTransition. This means - // it's sufficient to try and round previousTransition down. - assert previousTransition < utcMillis; - utcMillis = previousTransition; - } while (true); - } - } - - @Override - public long nextRoundingValue(long utcMillis) { - long floor = round(utcMillis); - // add one unit and round to get to next rounded value - long next = round(field.add(floor, 1)); - if (next == floor) { - // in rare case we need to add more than one unit - next = round(field.add(floor, 2)); - } - return next; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeByte(unit.id()); - out.writeString(timeZone.getID()); - } - - @Override - public int hashCode() { - return Objects.hash(unit, timeZone); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TimeUnitRounding other = (TimeUnitRounding) obj; - return Objects.equals(unit, other.unit) && Objects.equals(timeZone, other.timeZone); - } - - @Override - public String toString() { - return "[" + timeZone + "][" + unit + "]"; - } - } - - /** - * Rounding time intervals - * - * @opensearch.internal - */ - static class TimeIntervalRounding extends Rounding { - - static final byte ID = 2; - - private final long interval; - private final DateTimeZone timeZone; - - TimeIntervalRounding(long interval, DateTimeZone timeZone) { - if (interval < 1) throw new IllegalArgumentException("Zero or negative time interval not supported"); - this.interval = interval; - this.timeZone = timeZone; - } - - TimeIntervalRounding(StreamInput in) throws IOException { - interval = in.readVLong(); - timeZone = DateTimeZone.forID(in.readString()); - } - - @Override - public byte id() { - return ID; - } - - @Override - public long round(long utcMillis) { - long timeLocal = timeZone.convertUTCToLocal(utcMillis); - long rounded = roundKey(timeLocal, interval) * interval; - long roundedUTC; - if (isInDSTGap(rounded) == false) { - roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis); - // check if we crossed DST transition, in this case we want the - // last rounded value before the transition - long transition = timeZone.previousTransition(utcMillis); - if (transition != utcMillis && transition > roundedUTC) { - roundedUTC = round(transition - 1); - } - } else { - /* - * Edge case where the rounded local time is illegal and landed - * in a DST gap. In this case, we choose 1ms tick after the - * transition date. We don't want the transition date itself - * because those dates, when rounded themselves, fall into the - * previous interval. This would violate the invariant that the - * rounding operation should be idempotent. - */ - roundedUTC = timeZone.previousTransition(utcMillis) + 1; - } - return roundedUTC; - } - - private static long roundKey(long value, long interval) { - if (value < 0) { - return (value - interval + 1) / interval; - } else { - return value / interval; - } - } - - /** - * Determine whether the local instant is a valid instant in the given - * time zone. The logic for this is taken from - * {@link DateTimeZone#convertLocalToUTC(long, boolean)} for the - * `strict` mode case, but instead of throwing an - * {@link IllegalInstantException}, which is costly, we want to return a - * flag indicating that the value is illegal in that time zone. - */ - private boolean isInDSTGap(long instantLocal) { - if (timeZone.isFixed()) { - return false; - } - // get the offset at instantLocal (first estimate) - int offsetLocal = timeZone.getOffset(instantLocal); - // adjust instantLocal using the estimate and recalc the offset - int offset = timeZone.getOffset(instantLocal - offsetLocal); - // if the offsets differ, we must be near a DST boundary - if (offsetLocal != offset) { - // determine if we are in the DST gap - long nextLocal = timeZone.nextTransition(instantLocal - offsetLocal); - if (nextLocal == (instantLocal - offsetLocal)) { - nextLocal = Long.MAX_VALUE; - } - long nextAdjusted = timeZone.nextTransition(instantLocal - offset); - if (nextAdjusted == (instantLocal - offset)) { - nextAdjusted = Long.MAX_VALUE; - } - if (nextLocal != nextAdjusted) { - // we are in the DST gap - return true; - } - } - return false; - } - - @Override - public long nextRoundingValue(long time) { - long timeLocal = time; - timeLocal = timeZone.convertUTCToLocal(time); - long next = timeLocal + interval; - return timeZone.convertLocalToUTC(next, false); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(interval); - out.writeString(timeZone.getID()); - } - - @Override - public int hashCode() { - return Objects.hash(interval, timeZone); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TimeIntervalRounding other = (TimeIntervalRounding) obj; - return Objects.equals(interval, other.interval) && Objects.equals(timeZone, other.timeZone); - } - } - - /** - * Rounding streams - * - * @opensearch.internal - */ - public static class Streams { - - public static void write(Rounding rounding, StreamOutput out) throws IOException { - out.writeByte(rounding.id()); - rounding.writeTo(out); - } - - public static Rounding read(StreamInput in) throws IOException { - Rounding rounding; - byte id = in.readByte(); - switch (id) { - case TimeUnitRounding.ID: - rounding = new TimeUnitRounding(in); - break; - case TimeIntervalRounding.ID: - rounding = new TimeIntervalRounding(in); - break; - default: - throw new OpenSearchException("unknown rounding id [" + id + "]"); - } - return rounding; - } - - } - -} diff --git a/server/src/main/java/org/opensearch/common/rounding/package-info.java b/server/src/main/java/org/opensearch/common/rounding/package-info.java deleted file mode 100644 index 5fa3e39c6a786..0000000000000 --- a/server/src/main/java/org/opensearch/common/rounding/package-info.java +++ /dev/null @@ -1,10 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** Base DateTime rounding package. */ -package org.opensearch.common.rounding; diff --git a/server/src/test/java/org/opensearch/common/RoundingTests.java b/server/src/test/java/org/opensearch/common/RoundingTests.java index 1a499bac3e2e8..cc71ee08abcca 100644 --- a/server/src/test/java/org/opensearch/common/RoundingTests.java +++ b/server/src/test/java/org/opensearch/common/RoundingTests.java @@ -33,7 +33,6 @@ package org.opensearch.common; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.rounding.DateTimeUnit; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; import org.opensearch.common.unit.TimeValue; @@ -236,7 +235,7 @@ public void testOffsetRounding() { /** * Randomized test on TimeUnitRounding. Test uses random - * {@link DateTimeUnit} and {@link ZoneId} and often (50% of the time) + * {@link org.opensearch.common.Rounding.DateTimeUnit} and {@link ZoneId} and often (50% of the time) * chooses test dates that are exactly on or close to offset changes (e.g. * DST) in the chosen time zone. *

diff --git a/server/src/test/java/org/opensearch/common/rounding/DateTimeUnitTests.java b/server/src/test/java/org/opensearch/common/rounding/DateTimeUnitTests.java deleted file mode 100644 index 7b87e136c5f38..0000000000000 --- a/server/src/test/java/org/opensearch/common/rounding/DateTimeUnitTests.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.test.OpenSearchTestCase; - -import static org.opensearch.common.rounding.DateTimeUnit.DAY_OF_MONTH; -import static org.opensearch.common.rounding.DateTimeUnit.HOUR_OF_DAY; -import static org.opensearch.common.rounding.DateTimeUnit.MINUTES_OF_HOUR; -import static org.opensearch.common.rounding.DateTimeUnit.MONTH_OF_YEAR; -import static org.opensearch.common.rounding.DateTimeUnit.QUARTER; -import static org.opensearch.common.rounding.DateTimeUnit.SECOND_OF_MINUTE; -import static org.opensearch.common.rounding.DateTimeUnit.WEEK_OF_WEEKYEAR; -import static org.opensearch.common.rounding.DateTimeUnit.YEAR_OF_CENTURY; - -public class DateTimeUnitTests extends OpenSearchTestCase { - - /** - * test that we don't accidentally change enum ids - */ - public void testEnumIds() { - assertEquals(1, WEEK_OF_WEEKYEAR.id()); - assertEquals(WEEK_OF_WEEKYEAR, DateTimeUnit.resolve((byte) 1)); - - assertEquals(2, YEAR_OF_CENTURY.id()); - assertEquals(YEAR_OF_CENTURY, DateTimeUnit.resolve((byte) 2)); - - assertEquals(3, QUARTER.id()); - assertEquals(QUARTER, DateTimeUnit.resolve((byte) 3)); - - assertEquals(4, MONTH_OF_YEAR.id()); - assertEquals(MONTH_OF_YEAR, DateTimeUnit.resolve((byte) 4)); - - assertEquals(5, DAY_OF_MONTH.id()); - assertEquals(DAY_OF_MONTH, DateTimeUnit.resolve((byte) 5)); - - assertEquals(6, HOUR_OF_DAY.id()); - assertEquals(HOUR_OF_DAY, DateTimeUnit.resolve((byte) 6)); - - assertEquals(7, MINUTES_OF_HOUR.id()); - assertEquals(MINUTES_OF_HOUR, DateTimeUnit.resolve((byte) 7)); - - assertEquals(8, SECOND_OF_MINUTE.id()); - assertEquals(SECOND_OF_MINUTE, DateTimeUnit.resolve((byte) 8)); - } -} diff --git a/server/src/test/java/org/opensearch/common/rounding/RoundingDuelTests.java b/server/src/test/java/org/opensearch/common/rounding/RoundingDuelTests.java deleted file mode 100644 index 3088067cd1f84..0000000000000 --- a/server/src/test/java/org/opensearch/common/rounding/RoundingDuelTests.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchTestCase; -import org.joda.time.DateTimeZone; - -import java.time.ZoneOffset; - -import static org.hamcrest.Matchers.is; - -public class RoundingDuelTests extends OpenSearchTestCase { - - // dont include nano/micro seconds as rounding would become zero then and throw an exception - private static final String[] ALLOWED_TIME_SUFFIXES = new String[] { "d", "h", "ms", "s", "m" }; - - public void testDuellingImplementations() { - org.opensearch.common.Rounding.DateTimeUnit randomDateTimeUnit = randomFrom(org.opensearch.common.Rounding.DateTimeUnit.values()); - org.opensearch.common.Rounding.Prepared rounding; - Rounding roundingJoda; - - if (randomBoolean()) { - rounding = org.opensearch.common.Rounding.builder(randomDateTimeUnit).timeZone(ZoneOffset.UTC).build().prepareForUnknown(); - DateTimeUnit dateTimeUnit = DateTimeUnit.resolve(randomDateTimeUnit.getId()); - roundingJoda = Rounding.builder(dateTimeUnit).timeZone(DateTimeZone.UTC).build(); - } else { - TimeValue interval = timeValue(); - rounding = org.opensearch.common.Rounding.builder(interval).timeZone(ZoneOffset.UTC).build().prepareForUnknown(); - roundingJoda = Rounding.builder(interval).timeZone(DateTimeZone.UTC).build(); - } - - long roundValue = randomLong(); - assertThat(roundingJoda.round(roundValue), is(rounding.round(roundValue))); - } - - static TimeValue timeValue() { - return TimeValue.parseTimeValue(randomIntBetween(1, 1000) + randomFrom(ALLOWED_TIME_SUFFIXES), "settingName"); - } -} diff --git a/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java b/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java deleted file mode 100644 index d1b3adcd55f0c..0000000000000 --- a/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java +++ /dev/null @@ -1,822 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.rounding.Rounding.TimeIntervalRounding; -import org.opensearch.common.rounding.Rounding.TimeUnitRounding; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeConstants; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; -import org.hamcrest.Description; -import org.hamcrest.Matcher; -import org.hamcrest.TypeSafeMatcher; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.startsWith; - -public class TimeZoneRoundingTests extends OpenSearchTestCase { - - public void testUTCTimeUnitRounding() { - Rounding tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).build(); - DateTimeZone tz = DateTimeZone.UTC; - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-01T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-01T00:00:00.000Z")), isDate(time("2009-03-01T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-09T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-16T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.QUARTER).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-01T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-04-01T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T01:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-10T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-01T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2013-01-01T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.MINUTES_OF_HOUR).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:01:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T00:01:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.SECOND_OF_MINUTE).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:01:01.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T00:00:01.000Z"), tz)); - } - - public void testUTCIntervalRounding() { - Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).build(); - DateTimeZone tz = DateTimeZone.UTC; - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-03T12:00:00.000Z"), tz)); - assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T12:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T12:00:00.000Z")), isDate(time("2009-02-04T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(TimeValue.timeValueHours(48)).build(); - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-05T00:00:00.000Z"), tz)); - assertThat(tzRounding.round(time("2009-02-05T13:01:01")), isDate(time("2009-02-05T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-05T00:00:00.000Z")), isDate(time("2009-02-07T00:00:00.000Z"), tz)); - } - - /** - * test TimeIntervalRounding, (interval < 12h) with time zone shift - */ - public void testTimeIntervalRounding() { - DateTimeZone tz = DateTimeZone.forOffsetHours(-1); - Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(6)).timeZone(tz).build(); - assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T19:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-02T19:00:00.000Z")), isDate(time("2009-02-03T01:00:00.000Z"), tz)); - - assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T13:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T13:00:00.000Z")), isDate(time("2009-02-03T19:00:00.000Z"), tz)); - } - - /** - * test DayIntervalRounding, (interval >= 12h) with time zone shift - */ - public void testDayIntervalRounding() { - DateTimeZone tz = DateTimeZone.forOffsetHours(-8); - Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).timeZone(tz).build(); - assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T20:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-02T20:00:00.000Z")), isDate(time("2009-02-03T08:00:00.000Z"), tz)); - - assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T08:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T08:00:00.000Z")), isDate(time("2009-02-03T20:00:00.000Z"), tz)); - } - - public void testDayRounding() { - int timezoneOffset = -2; - Rounding tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forOffsetHours(timezoneOffset)).build(); - assertThat(tzRounding.round(0), equalTo(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis())); - assertThat( - tzRounding.nextRoundingValue(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis()), - equalTo(TimeValue.timeValueHours(-timezoneOffset).millis()) - ); - - DateTimeZone tz = DateTimeZone.forID("-08:00"); - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); - assertThat(tzRounding.round(time("2012-04-01T04:15:30Z")), isDate(time("2012-03-31T08:00:00Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); - assertThat(tzRounding.round(time("2012-04-01T04:15:30Z")), equalTo(time("2012-03-01T08:00:00Z"))); - - // date in Feb-3rd, but still in Feb-2nd in -02:00 timezone - tz = DateTimeZone.forID("-02:00"); - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-02T02:00:00"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-02T02:00:00")), isDate(time("2009-02-03T02:00:00"), tz)); - - // date in Feb-3rd, also in -02:00 timezone - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); - assertThat(tzRounding.round(time("2009-02-03T02:01:01")), isDate(time("2009-02-03T02:00:00"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T02:00:00")), isDate(time("2009-02-04T02:00:00"), tz)); - } - - public void testTimeRounding() { - // hour unit - DateTimeZone tz = DateTimeZone.forOffsetHours(-2); - Rounding tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); - assertThat(tzRounding.round(0), equalTo(0L)); - assertThat(tzRounding.nextRoundingValue(0L), equalTo(TimeValue.timeValueHours(1L).getMillis())); - - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T01:00:00"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T01:00:00")), isDate(time("2009-02-03T02:00:00"), tz)); - } - - public void testTimeUnitRoundingDST() { - Rounding tzRounding; - // testing savings to non savings switch - DateTimeZone cet = DateTimeZone.forID("CET"); - tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); - assertThat(tzRounding.round(time("2014-10-26T01:01:01", cet)), isDate(time("2014-10-26T01:00:00+02:00"), cet)); - assertThat(tzRounding.nextRoundingValue(time("2014-10-26T01:00:00", cet)), isDate(time("2014-10-26T02:00:00+02:00"), cet)); - assertThat(tzRounding.nextRoundingValue(time("2014-10-26T02:00:00", cet)), isDate(time("2014-10-26T02:00:00+01:00"), cet)); - - // testing non savings to savings switch - tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); - assertThat(tzRounding.round(time("2014-03-30T01:01:01", cet)), isDate(time("2014-03-30T01:00:00+01:00"), cet)); - assertThat(tzRounding.nextRoundingValue(time("2014-03-30T01:00:00", cet)), isDate(time("2014-03-30T03:00:00", cet), cet)); - assertThat(tzRounding.nextRoundingValue(time("2014-03-30T03:00:00", cet)), isDate(time("2014-03-30T04:00:00", cet), cet)); - - // testing non savings to savings switch (America/Chicago) - DateTimeZone chg = DateTimeZone.forID("America/Chicago"); - Rounding tzRounding_utc = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.UTC).build(); - assertThat(tzRounding.round(time("2014-03-09T03:01:01", chg)), isDate(time("2014-03-09T03:00:00", chg), chg)); - - Rounding tzRounding_chg = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(chg).build(); - assertThat(tzRounding_chg.round(time("2014-03-09T03:01:01", chg)), isDate(time("2014-03-09T03:00:00", chg), chg)); - - // testing savings to non savings switch 2013 (America/Chicago) - assertThat(tzRounding_utc.round(time("2013-11-03T06:01:01", chg)), isDate(time("2013-11-03T06:00:00", chg), chg)); - assertThat(tzRounding_chg.round(time("2013-11-03T06:01:01", chg)), isDate(time("2013-11-03T06:00:00", chg), chg)); - - // testing savings to non savings switch 2014 (America/Chicago) - assertThat(tzRounding_utc.round(time("2014-11-02T06:01:01", chg)), isDate(time("2014-11-02T06:00:00", chg), chg)); - assertThat(tzRounding_chg.round(time("2014-11-02T06:01:01", chg)), isDate(time("2014-11-02T06:00:00", chg), chg)); - } - - /** - * Randomized test on TimeUnitRounding. Test uses random - * {@link DateTimeUnit} and {@link DateTimeZone} and often (50% of the time) - * chooses test dates that are exactly on or close to offset changes (e.g. - * DST) in the chosen time zone. - *

- * It rounds the test date down and up and performs various checks on the - * rounding unit interval that is defined by this. Assumptions tested are - * described in - * {@link #assertInterval(long, long, long, Rounding, DateTimeZone)} - */ - public void testRoundingRandom() { - for (int i = 0; i < 1000; ++i) { - DateTimeUnit timeUnit = randomTimeUnit(); - DateTimeZone tz = randomDateTimeZone(); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - long date = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 - long unitMillis = timeUnit.field(tz).getDurationField().getUnitMillis(); - if (randomBoolean()) { - nastyDate(date, tz, unitMillis); - } - final long roundedDate = rounding.round(date); - final long nextRoundingValue = rounding.nextRoundingValue(roundedDate); - - assertInterval(roundedDate, date, nextRoundingValue, rounding, tz); - - // check correct unit interval width for units smaller than a day, they should be fixed size except for transitions - if (unitMillis <= DateTimeConstants.MILLIS_PER_DAY) { - // if the interval defined didn't cross timezone offset transition, it should cover unitMillis width - if (tz.getOffset(roundedDate - 1) == tz.getOffset(nextRoundingValue + 1)) { - assertThat( - "unit interval width not as expected for [" + timeUnit + "], [" + tz + "] at " + new DateTime(roundedDate), - nextRoundingValue - roundedDate, - equalTo(unitMillis) - ); - } - } - } - } - - /** - * To be even more nasty, go to a transition in the selected time zone. - * In one third of the cases stay there, otherwise go half a unit back or forth - */ - private static long nastyDate(long initialDate, DateTimeZone timezone, long unitMillis) { - long date = timezone.nextTransition(initialDate); - if (randomBoolean()) { - return date + (randomLong() % unitMillis); // positive and negative offset possible - } else { - return date; - } - } - - /** - * test DST end with interval rounding - * CET: 25 October 2015, 03:00:00 clocks were turned backward 1 hour to 25 October 2015, 02:00:00 local standard time - */ - public void testTimeIntervalCET_DST_End() { - long interval = TimeUnit.MINUTES.toMillis(20); - DateTimeZone tz = DateTimeZone.forID("CET"); - Rounding rounding = new TimeIntervalRounding(interval, tz); - - assertThat(rounding.round(time("2015-10-25T01:55:00+02:00")), isDate(time("2015-10-25T01:40:00+02:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:15:00+02:00")), isDate(time("2015-10-25T02:00:00+02:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:35:00+02:00")), isDate(time("2015-10-25T02:20:00+02:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:55:00+02:00")), isDate(time("2015-10-25T02:40:00+02:00"), tz)); - // after DST shift - assertThat(rounding.round(time("2015-10-25T02:15:00+01:00")), isDate(time("2015-10-25T02:00:00+01:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:35:00+01:00")), isDate(time("2015-10-25T02:20:00+01:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:55:00+01:00")), isDate(time("2015-10-25T02:40:00+01:00"), tz)); - assertThat(rounding.round(time("2015-10-25T03:15:00+01:00")), isDate(time("2015-10-25T03:00:00+01:00"), tz)); - } - - /** - * test DST start with interval rounding - * CET: 27 March 2016, 02:00:00 clocks were turned forward 1 hour to 27 March 2016, 03:00:00 local daylight time - */ - public void testTimeIntervalCET_DST_Start() { - long interval = TimeUnit.MINUTES.toMillis(20); - DateTimeZone tz = DateTimeZone.forID("CET"); - Rounding rounding = new TimeIntervalRounding(interval, tz); - // test DST start - assertThat(rounding.round(time("2016-03-27T01:55:00+01:00")), isDate(time("2016-03-27T01:40:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T02:00:00+01:00")), isDate(time("2016-03-27T03:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:15:00+02:00")), isDate(time("2016-03-27T03:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:35:00+02:00")), isDate(time("2016-03-27T03:20:00+02:00"), tz)); - } - - /** - * test DST start with offset not fitting interval, e.g. Asia/Kathmandu - * adding 15min on 1986-01-01T00:00:00 the interval from - * 1986-01-01T00:15:00+05:45 to 1986-01-01T00:20:00+05:45 to only be 5min - * long - */ - public void testTimeInterval_Kathmandu_DST_Start() { - long interval = TimeUnit.MINUTES.toMillis(20); - DateTimeZone tz = DateTimeZone.forID("Asia/Kathmandu"); - Rounding rounding = new TimeIntervalRounding(interval, tz); - assertThat(rounding.round(time("1985-12-31T23:55:00+05:30")), isDate(time("1985-12-31T23:40:00+05:30"), tz)); - assertThat(rounding.round(time("1986-01-01T00:16:00+05:45")), isDate(time("1986-01-01T00:15:00+05:45"), tz)); - assertThat(time("1986-01-01T00:15:00+05:45") - time("1985-12-31T23:40:00+05:30"), equalTo(TimeUnit.MINUTES.toMillis(20))); - assertThat(rounding.round(time("1986-01-01T00:26:00+05:45")), isDate(time("1986-01-01T00:20:00+05:45"), tz)); - assertThat(time("1986-01-01T00:20:00+05:45") - time("1986-01-01T00:15:00+05:45"), equalTo(TimeUnit.MINUTES.toMillis(5))); - assertThat(rounding.round(time("1986-01-01T00:46:00+05:45")), isDate(time("1986-01-01T00:40:00+05:45"), tz)); - assertThat(time("1986-01-01T00:40:00+05:45") - time("1986-01-01T00:20:00+05:45"), equalTo(TimeUnit.MINUTES.toMillis(20))); - } - - /** - * Special test for intervals that don't fit evenly into rounding interval. - * In this case, when interval crosses DST transition point, rounding in local - * time can land in a DST gap which results in wrong UTC rounding values. - */ - public void testIntervalRounding_NotDivisibleInteval() { - DateTimeZone tz = DateTimeZone.forID("CET"); - long interval = TimeUnit.MINUTES.toMillis(14); - Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); - - assertThat(rounding.round(time("2016-03-27T01:41:00+01:00")), isDate(time("2016-03-27T01:30:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T01:51:00+01:00")), isDate(time("2016-03-27T01:44:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T01:59:00+01:00")), isDate(time("2016-03-27T01:58:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:05:00+02:00")), isDate(time("2016-03-27T03:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:12:00+02:00")), isDate(time("2016-03-27T03:08:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:25:00+02:00")), isDate(time("2016-03-27T03:22:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:39:00+02:00")), isDate(time("2016-03-27T03:36:00+02:00"), tz)); - } - - /** - * Test for half day rounding intervals scrossing DST. - */ - public void testIntervalRounding_HalfDay_DST() { - DateTimeZone tz = DateTimeZone.forID("CET"); - long interval = TimeUnit.HOURS.toMillis(12); - Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); - - assertThat(rounding.round(time("2016-03-26T01:00:00+01:00")), isDate(time("2016-03-26T00:00:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-26T13:00:00+01:00")), isDate(time("2016-03-26T12:00:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T01:00:00+01:00")), isDate(time("2016-03-27T00:00:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T13:00:00+02:00")), isDate(time("2016-03-27T12:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-28T01:00:00+02:00")), isDate(time("2016-03-28T00:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-28T13:00:00+02:00")), isDate(time("2016-03-28T12:00:00+02:00"), tz)); - } - - /** - * randomized test on {@link TimeIntervalRounding} with random interval and time zone offsets - */ - public void testIntervalRoundingRandom() { - for (int i = 0; i < 1000; i++) { - TimeUnit unit = randomFrom(new TimeUnit[] { TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS }); - long interval = unit.toMillis(randomIntBetween(1, 365)); - DateTimeZone tz = randomDateTimeZone(); - Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); - long mainDate = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 - if (randomBoolean()) { - mainDate = nastyDate(mainDate, tz, interval); - } - // check two intervals around date - long previousRoundedValue = Long.MIN_VALUE; - for (long date = mainDate - 2 * interval; date < mainDate + 2 * interval; date += interval / 2) { - try { - final long roundedDate = rounding.round(date); - final long nextRoundingValue = rounding.nextRoundingValue(roundedDate); - assertThat("Rounding should be idempotent", roundedDate, equalTo(rounding.round(roundedDate))); - assertThat("Rounded value smaller or equal than unrounded", roundedDate, lessThanOrEqualTo(date)); - assertThat( - "Values smaller than rounded value should round further down", - rounding.round(roundedDate - 1), - lessThan(roundedDate) - ); - assertThat("Rounding should be >= previous rounding value", roundedDate, greaterThanOrEqualTo(previousRoundedValue)); - - if (tz.isFixed()) { - assertThat("NextRounding value should be greater than date", nextRoundingValue, greaterThan(roundedDate)); - assertThat( - "NextRounding value should be interval from rounded value", - nextRoundingValue - roundedDate, - equalTo(interval) - ); - assertThat( - "NextRounding value should be a rounded date", - nextRoundingValue, - equalTo(rounding.round(nextRoundingValue)) - ); - } - previousRoundedValue = roundedDate; - } catch (AssertionError e) { - logger.error("Rounding error at {}, timezone {}, interval: {},", new DateTime(date, tz), tz, interval); - throw e; - } - } - } - } - - /** - * Test that rounded values are always greater or equal to last rounded value if date is increasing. - * The example covers an interval around 2011-10-30T02:10:00+01:00, time zone CET, interval: 2700000ms - */ - public void testIntervalRoundingMonotonic_CET() { - long interval = TimeUnit.MINUTES.toMillis(45); - DateTimeZone tz = DateTimeZone.forID("CET"); - Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); - List> expectedDates = new ArrayList<>(); - // first date is the date to be rounded, second the expected result - expectedDates.add(new Tuple<>("2011-10-30T01:40:00.000+02:00", "2011-10-30T01:30:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:02:30.000+02:00", "2011-10-30T01:30:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:25:00.000+02:00", "2011-10-30T02:15:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:47:30.000+02:00", "2011-10-30T02:15:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:10:00.000+01:00", "2011-10-30T02:15:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:32:30.000+01:00", "2011-10-30T02:15:00.000+01:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:55:00.000+01:00", "2011-10-30T02:15:00.000+01:00")); - expectedDates.add(new Tuple<>("2011-10-30T03:17:30.000+01:00", "2011-10-30T03:00:00.000+01:00")); - - long previousDate = Long.MIN_VALUE; - for (Tuple dates : expectedDates) { - final long roundedDate = rounding.round(time(dates.v1())); - assertThat(roundedDate, isDate(time(dates.v2()), tz)); - assertThat(roundedDate, greaterThanOrEqualTo(previousDate)); - previousDate = roundedDate; - } - // here's what this means for interval widths - assertEquals(TimeUnit.MINUTES.toMillis(45), time("2011-10-30T02:15:00.000+02:00") - time("2011-10-30T01:30:00.000+02:00")); - assertEquals(TimeUnit.MINUTES.toMillis(60), time("2011-10-30T02:15:00.000+01:00") - time("2011-10-30T02:15:00.000+02:00")); - assertEquals(TimeUnit.MINUTES.toMillis(45), time("2011-10-30T03:00:00.000+01:00") - time("2011-10-30T02:15:00.000+01:00")); - } - - /** - * special test for DST switch from #9491 - */ - public void testAmbiguousHoursAfterDSTSwitch() { - Rounding tzRounding; - final DateTimeZone tz = DateTimeZone.forID("Asia/Jerusalem"); - tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-10-26T00:30:00+03:00")), isDate(time("2014-10-26T00:00:00+03:00"), tz)); - assertThat(tzRounding.round(time("2014-10-26T01:30:00+03:00")), isDate(time("2014-10-26T01:00:00+03:00"), tz)); - // the utc date for "2014-10-25T03:00:00+03:00" and "2014-10-25T03:00:00+02:00" is the same, local time turns back 1h here - assertThat(time("2014-10-26T03:00:00+03:00"), isDate(time("2014-10-26T02:00:00+02:00"), tz)); - assertThat(tzRounding.round(time("2014-10-26T01:30:00+02:00")), isDate(time("2014-10-26T01:00:00+02:00"), tz)); - assertThat(tzRounding.round(time("2014-10-26T02:30:00+02:00")), isDate(time("2014-10-26T02:00:00+02:00"), tz)); - - // Day interval - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-11-11T00:00:00", tz), tz)); - // DST on - assertThat(tzRounding.round(time("2014-08-11T17:00:00", tz)), isDate(time("2014-08-11T00:00:00", tz), tz)); - // Day of switching DST on -> off - assertThat(tzRounding.round(time("2014-10-26T17:00:00", tz)), isDate(time("2014-10-26T00:00:00", tz), tz)); - // Day of switching DST off -> on - assertThat(tzRounding.round(time("2015-03-27T17:00:00", tz)), isDate(time("2015-03-27T00:00:00", tz), tz)); - - // Month interval - tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-11-01T00:00:00", tz), tz)); - // DST on - assertThat(tzRounding.round(time("2014-10-10T17:00:00", tz)), isDate(time("2014-10-01T00:00:00", tz), tz)); - - // Year interval - tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-01-01T00:00:00", tz), tz)); - - // Two timestamps in same year and different timezone offset ("Double buckets" issue - #9491) - tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(tzRounding.round(time("2014-08-11T17:00:00", tz)), tz)); - } - - /** - * test for #10025, strict local to UTC conversion can cause joda exceptions - * on DST start - */ - public void testLenientConversionDST() { - DateTimeZone tz = DateTimeZone.forID("America/Sao_Paulo"); - long start = time("2014-10-18T20:50:00.000", tz); - long end = time("2014-10-19T01:00:00.000", tz); - Rounding tzRounding = new Rounding.TimeUnitRounding(DateTimeUnit.MINUTES_OF_HOUR, tz); - Rounding dayTzRounding = new Rounding.TimeIntervalRounding(60000, tz); - for (long time = start; time < end; time = time + 60000) { - assertThat(tzRounding.nextRoundingValue(time), greaterThan(time)); - assertThat(dayTzRounding.nextRoundingValue(time), greaterThan(time)); - } - } - - public void testEdgeCasesTransition() { - { - // standard +/-1 hour DST transition, CET - DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; - DateTimeZone tz = DateTimeZone.forID("CET"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - // 29 Mar 2015 - Daylight Saving Time Started - // at 02:00:00 clocks were turned forward 1 hour to 03:00:00 - assertInterval(time("2015-03-29T00:00:00.000+01:00"), time("2015-03-29T01:00:00.000+01:00"), rounding, 60, tz); - assertInterval(time("2015-03-29T01:00:00.000+01:00"), time("2015-03-29T03:00:00.000+02:00"), rounding, 60, tz); - assertInterval(time("2015-03-29T03:00:00.000+02:00"), time("2015-03-29T04:00:00.000+02:00"), rounding, 60, tz); - - // 25 Oct 2015 - Daylight Saving Time Ended - // at 03:00:00 clocks were turned backward 1 hour to 02:00:00 - assertInterval(time("2015-10-25T01:00:00.000+02:00"), time("2015-10-25T02:00:00.000+02:00"), rounding, 60, tz); - assertInterval(time("2015-10-25T02:00:00.000+02:00"), time("2015-10-25T02:00:00.000+01:00"), rounding, 60, tz); - assertInterval(time("2015-10-25T02:00:00.000+01:00"), time("2015-10-25T03:00:00.000+01:00"), rounding, 60, tz); - } - - { - // time zone "Asia/Kathmandu" - // 1 Jan 1986 - Time Zone Change (IST → NPT), at 00:00:00 clocks were turned forward 00:15 minutes - // - // hour rounding is stable before 1985-12-31T23:00:00.000 and after 1986-01-01T01:00:00.000+05:45 - // the interval between is 105 minutes long because the hour after transition starts at 00:15 - // which is not a round value for hourly rounding - DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; - DateTimeZone tz = DateTimeZone.forID("Asia/Kathmandu"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - assertInterval(time("1985-12-31T22:00:00.000+05:30"), time("1985-12-31T23:00:00.000+05:30"), rounding, 60, tz); - assertInterval(time("1985-12-31T23:00:00.000+05:30"), time("1986-01-01T01:00:00.000+05:45"), rounding, 105, tz); - assertInterval(time("1986-01-01T01:00:00.000+05:45"), time("1986-01-01T02:00:00.000+05:45"), rounding, 60, tz); - } - - { - // time zone "Australia/Lord_Howe" - // 3 Mar 1991 - Daylight Saving Time Ended - // at 02:00:00 clocks were turned backward 0:30 hours to Sunday, 3 March 1991, 01:30:00 - DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; - DateTimeZone tz = DateTimeZone.forID("Australia/Lord_Howe"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - assertInterval(time("1991-03-03T00:00:00.000+11:00"), time("1991-03-03T01:00:00.000+11:00"), rounding, 60, tz); - assertInterval(time("1991-03-03T01:00:00.000+11:00"), time("1991-03-03T02:00:00.000+10:30"), rounding, 90, tz); - assertInterval(time("1991-03-03T02:00:00.000+10:30"), time("1991-03-03T03:00:00.000+10:30"), rounding, 60, tz); - - // 27 Oct 1991 - Daylight Saving Time Started - // at 02:00:00 clocks were turned forward 0:30 hours to 02:30:00 - assertInterval(time("1991-10-27T00:00:00.000+10:30"), time("1991-10-27T01:00:00.000+10:30"), rounding, 60, tz); - // the interval containing the switch time is 90 minutes long - assertInterval(time("1991-10-27T01:00:00.000+10:30"), time("1991-10-27T03:00:00.000+11:00"), rounding, 90, tz); - assertInterval(time("1991-10-27T03:00:00.000+11:00"), time("1991-10-27T04:00:00.000+11:00"), rounding, 60, tz); - } - - { - // time zone "Pacific/Chatham" - // 5 Apr 2015 - Daylight Saving Time Ended - // at 03:45:00 clocks were turned backward 1 hour to 02:45:00 - DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; - DateTimeZone tz = DateTimeZone.forID("Pacific/Chatham"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - assertInterval(time("2015-04-05T02:00:00.000+13:45"), time("2015-04-05T03:00:00.000+13:45"), rounding, 60, tz); - assertInterval(time("2015-04-05T03:00:00.000+13:45"), time("2015-04-05T03:00:00.000+12:45"), rounding, 60, tz); - assertInterval(time("2015-04-05T03:00:00.000+12:45"), time("2015-04-05T04:00:00.000+12:45"), rounding, 60, tz); - - // 27 Sep 2015 - Daylight Saving Time Started - // at 02:45:00 clocks were turned forward 1 hour to 03:45:00 - - assertInterval(time("2015-09-27T01:00:00.000+12:45"), time("2015-09-27T02:00:00.000+12:45"), rounding, 60, tz); - assertInterval(time("2015-09-27T02:00:00.000+12:45"), time("2015-09-27T04:00:00.000+13:45"), rounding, 60, tz); - assertInterval(time("2015-09-27T04:00:00.000+13:45"), time("2015-09-27T05:00:00.000+13:45"), rounding, 60, tz); - } - } - - public void testDST_Europe_Rome() { - // time zone "Europe/Rome", rounding to days. Rome had two midnights on the day the clocks went back in 1978, and - // timeZone.convertLocalToUTC() gives the later of the two because Rome is east of UTC, whereas we want the earlier. - - DateTimeUnit timeUnit = DateTimeUnit.DAY_OF_MONTH; - DateTimeZone tz = DateTimeZone.forID("Europe/Rome"); - Rounding rounding = new TimeUnitRounding(timeUnit, tz); - - { - long timeBeforeFirstMidnight = time("1978-09-30T23:59:00+02:00"); - long floor = rounding.round(timeBeforeFirstMidnight); - assertThat(floor, isDate(time("1978-09-30T00:00:00+02:00"), tz)); - } - - { - long timeBetweenMidnights = time("1978-10-01T00:30:00+02:00"); - long floor = rounding.round(timeBetweenMidnights); - assertThat(floor, isDate(time("1978-10-01T00:00:00+02:00"), tz)); - } - - { - long timeAfterSecondMidnight = time("1978-10-01T00:30:00+01:00"); - long floor = rounding.round(timeAfterSecondMidnight); - assertThat(floor, isDate(time("1978-10-01T00:00:00+02:00"), tz)); - - long prevFloor = rounding.round(floor - 1); - assertThat(prevFloor, lessThan(floor)); - assertThat(prevFloor, isDate(time("1978-09-30T00:00:00+02:00"), tz)); - } - } - - /** - * Test for a time zone whose days overlap because the clocks are set back across midnight at the end of DST. - */ - public void testDST_America_St_Johns() { - // time zone "America/St_Johns", rounding to days. - DateTimeUnit timeUnit = DateTimeUnit.DAY_OF_MONTH; - DateTimeZone tz = DateTimeZone.forID("America/St_Johns"); - Rounding rounding = new TimeUnitRounding(timeUnit, tz); - - // 29 October 2006 - Daylight Saving Time ended, changing the UTC offset from -02:30 to -03:30. - // This happened at 02:31 UTC, 00:01 local time, so the clocks were set back 1 hour to 23:01 on the 28th. - // This means that 2006-10-29 has _two_ midnights, one in the -02:30 offset and one in the -03:30 offset. - // Only the first of these is considered "rounded". Moreover, the extra time between 23:01 and 23:59 - // should be considered as part of the 28th even though it comes after midnight on the 29th. - - { - // Times before the first midnight should be rounded up to the first midnight. - long timeBeforeFirstMidnight = time("2006-10-28T23:30:00.000-02:30"); - long floor = rounding.round(timeBeforeFirstMidnight); - assertThat(floor, isDate(time("2006-10-28T00:00:00.000-02:30"), tz)); - long ceiling = rounding.nextRoundingValue(timeBeforeFirstMidnight); - assertThat(ceiling, isDate(time("2006-10-29T00:00:00.000-02:30"), tz)); - assertInterval(floor, timeBeforeFirstMidnight, ceiling, rounding, tz); - } - - { - // Times between the two midnights which are on the later day should be rounded down to the later day's midnight. - long timeBetweenMidnights = time("2006-10-29T00:00:30.000-02:30"); - // (this is halfway through the last minute before the clocks changed, in which local time was ambiguous) - - long floor = rounding.round(timeBetweenMidnights); - assertThat(floor, isDate(time("2006-10-29T00:00:00.000-02:30"), tz)); - - long ceiling = rounding.nextRoundingValue(timeBetweenMidnights); - assertThat(ceiling, isDate(time("2006-10-30T00:00:00.000-03:30"), tz)); - - assertInterval(floor, timeBetweenMidnights, ceiling, rounding, tz); - } - - { - // Times between the two midnights which are on the earlier day should be rounded down to the earlier day's midnight. - long timeBetweenMidnights = time("2006-10-28T23:30:00.000-03:30"); - // (this is halfway through the hour after the clocks changed, in which local time was ambiguous) - - long floor = rounding.round(timeBetweenMidnights); - assertThat(floor, isDate(time("2006-10-28T00:00:00.000-02:30"), tz)); - - long ceiling = rounding.nextRoundingValue(timeBetweenMidnights); - assertThat(ceiling, isDate(time("2006-10-29T00:00:00.000-02:30"), tz)); - - assertInterval(floor, timeBetweenMidnights, ceiling, rounding, tz); - } - - { - // Times after the second midnight should be rounded down to the first midnight. - long timeAfterSecondMidnight = time("2006-10-29T06:00:00.000-03:30"); - long floor = rounding.round(timeAfterSecondMidnight); - assertThat(floor, isDate(time("2006-10-29T00:00:00.000-02:30"), tz)); - long ceiling = rounding.nextRoundingValue(timeAfterSecondMidnight); - assertThat(ceiling, isDate(time("2006-10-30T00:00:00.000-03:30"), tz)); - assertInterval(floor, timeAfterSecondMidnight, ceiling, rounding, tz); - } - } - - /** - * tests for dst transition with overlaps and day roundings. - */ - public void testDST_END_Edgecases() { - // First case, dst happens at 1am local time, switching back one hour. - // We want the overlapping hour to count for the next day, making it a 25h interval - - DateTimeUnit timeUnit = DateTimeUnit.DAY_OF_MONTH; - DateTimeZone tz = DateTimeZone.forID("Atlantic/Azores"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - // Sunday, 29 October 2000, 01:00:00 clocks were turned backward 1 hour - // to Sunday, 29 October 2000, 00:00:00 local standard time instead - // which means there were two midnights that day. - - long midnightBeforeTransition = time("2000-10-29T00:00:00", tz); - long midnightOfTransition = time("2000-10-29T00:00:00-01:00"); - assertEquals(60L * 60L * 1000L, midnightOfTransition - midnightBeforeTransition); - long nextMidnight = time("2000-10-30T00:00:00", tz); - - assertInterval(midnightBeforeTransition, nextMidnight, rounding, 25 * 60, tz); - - assertThat(rounding.round(time("2000-10-29T06:00:00-01:00")), isDate(time("2000-10-29T00:00:00Z"), tz)); - - // Second case, dst happens at 0am local time, switching back one hour to 23pm local time. - // We want the overlapping hour to count for the previous day here - - tz = DateTimeZone.forID("America/Lima"); - rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - // Sunday, 1 April 1990, 00:00:00 clocks were turned backward 1 hour to - // Saturday, 31 March 1990, 23:00:00 local standard time instead - - midnightBeforeTransition = time("1990-03-31T00:00:00.000-04:00"); - nextMidnight = time("1990-04-01T00:00:00.000-05:00"); - assertInterval(midnightBeforeTransition, nextMidnight, rounding, 25 * 60, tz); - - // make sure the next interval is 24h long again - long midnightAfterTransition = time("1990-04-01T00:00:00.000-05:00"); - nextMidnight = time("1990-04-02T00:00:00.000-05:00"); - assertInterval(midnightAfterTransition, nextMidnight, rounding, 24 * 60, tz); - } - - /** - * Test that time zones are correctly parsed. There is a bug with - * Joda 2.9.4 (see https://github.com/JodaOrg/joda-time/issues/373) - */ - public void testsTimeZoneParsing() { - final DateTime expected = new DateTime(2016, 11, 10, 5, 37, 59, randomDateTimeZone()); - - // Formatter used to print and parse the sample date. - // Printing the date works but parsing it back fails - // with Joda 2.9.4 - DateTimeFormatter formatter = DateTimeFormat.forPattern("YYYY-MM-dd'T'HH:mm:ss " + randomFrom("ZZZ", "[ZZZ]", "'['ZZZ']'")); - - String dateTimeAsString = formatter.print(expected); - assertThat(dateTimeAsString, startsWith("2016-11-10T05:37:59 ")); - - DateTime parsedDateTime = formatter.parseDateTime(dateTimeAsString); - assertThat(parsedDateTime.getZone(), equalTo(expected.getZone())); - } - - private static void assertInterval(long rounded, long nextRoundingValue, Rounding rounding, int minutes, DateTimeZone tz) { - assertInterval(rounded, dateBetween(rounded, nextRoundingValue), nextRoundingValue, rounding, tz); - assertEquals(DateTimeConstants.MILLIS_PER_MINUTE * minutes, nextRoundingValue - rounded); - } - - /** - * perform a number on assertions and checks on {@link TimeUnitRounding} intervals - * @param rounded the expected low end of the rounding interval - * @param unrounded a date in the interval to be checked for rounding - * @param nextRoundingValue the expected upper end of the rounding interval - * @param rounding the rounding instance - */ - private static void assertInterval(long rounded, long unrounded, long nextRoundingValue, Rounding rounding, DateTimeZone tz) { - assertThat("rounding should be idempotent ", rounding.round(rounded), isDate(rounded, tz)); - assertThat("rounded value smaller or equal than unrounded" + rounding, rounded, lessThanOrEqualTo(unrounded)); - assertThat("values less than rounded should round further down" + rounding, rounding.round(rounded - 1), lessThan(rounded)); - assertThat("nextRounding value should be a rounded date", rounding.round(nextRoundingValue), isDate(nextRoundingValue, tz)); - assertThat( - "values above nextRounding should round down there", - rounding.round(nextRoundingValue + 1), - isDate(nextRoundingValue, tz) - ); - - if (isTimeWithWellDefinedRounding(tz, unrounded)) { - assertThat("nextRounding value should be greater than date" + rounding, nextRoundingValue, greaterThan(unrounded)); - - long dateBetween = dateBetween(rounded, nextRoundingValue); - assertThat( - "dateBetween [" + new DateTime(dateBetween, tz) + "] should round down to roundedDate", - rounding.round(dateBetween), - isDate(rounded, tz) - ); - assertThat( - "dateBetween [" + new DateTime(dateBetween, tz) + "] should round up to nextRoundingValue", - rounding.nextRoundingValue(dateBetween), - isDate(nextRoundingValue, tz) - ); - } - } - - private static boolean isTimeWithWellDefinedRounding(DateTimeZone tz, long t) { - if (tz.getID().equals("America/St_Johns") - || tz.getID().equals("America/Goose_Bay") - || tz.getID().equals("America/Moncton") - || tz.getID().equals("Canada/Newfoundland")) { - - // Clocks went back at 00:01 between 1987 and 2010, causing overlapping days. - // These timezones are otherwise uninteresting, so just skip this period. - - return t <= time("1987-10-01T00:00:00Z") || t >= time("2010-12-01T00:00:00Z"); - } - - if (tz.getID().equals("Antarctica/Casey")) { - - // Clocks went back 3 hours at 02:00 on 2010-03-05, causing overlapping days. - - return t <= time("2010-03-03T00:00:00Z") || t >= time("2010-03-07T00:00:00Z"); - } - - return true; - } - - private static long dateBetween(long lower, long upper) { - long dateBetween = randomLongBetween(lower, upper - 1); - assert lower <= dateBetween && dateBetween < upper; - return dateBetween; - } - - private static DateTimeUnit randomTimeUnit() { - byte id = (byte) randomIntBetween(1, 8); - return DateTimeUnit.resolve(id); - } - - private static long time(String time) { - return time(time, DateTimeZone.UTC); - } - - private static long time(String time, DateTimeZone zone) { - return ISODateTimeFormat.dateOptionalTimeParser().withZone(zone).parseMillis(time); - } - - private static Matcher isDate(final long expected, DateTimeZone tz) { - return new TypeSafeMatcher() { - @Override - public boolean matchesSafely(final Long item) { - return expected == item.longValue(); - } - - @Override - public void describeTo(Description description) { - description.appendText(new DateTime(expected, tz) + " [" + expected + "] "); - } - - @Override - protected void describeMismatchSafely(final Long actual, final Description mismatchDescription) { - mismatchDescription.appendText(" was ").appendValue(new DateTime(actual, tz) + " [" + actual + "]"); - } - }; - } -} From fcbec24408147f9b55b71458996c1cdb6a42dfbd Mon Sep 17 00:00:00 2001 From: Siddhant Deshmukh Date: Fri, 27 Oct 2023 12:50:35 -0700 Subject: [PATCH 100/178] Change log level to trace (#10971) Signed-off-by: Siddhant Deshmukh --- .../org/opensearch/action/search/SearchQueryCategorizer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java index 9cbe2d2ffcb7d..8fe1be610f9af 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java @@ -75,7 +75,7 @@ private void logQueryShape(QueryBuilder topLevelQueryBuilder) { } QueryShapeVisitor shapeVisitor = new QueryShapeVisitor(); topLevelQueryBuilder.visit(shapeVisitor); - log.debug("Query shape : {}", shapeVisitor.prettyPrintTree(" ")); + log.trace("Query shape : {}", shapeVisitor.prettyPrintTree(" ")); } } From bc74731a537c461cd1d7666bc3be82fbe18e143d Mon Sep 17 00:00:00 2001 From: Neetika Singhal Date: Fri, 27 Oct 2023 16:41:26 -0700 Subject: [PATCH 101/178] update the indexRandom function to create more segments and update (#10247) IndicesRequestCacheIT, HighlighterSearcherIT to run with minimum of 2 slices Signed-off-by: Neetika Singhal --- CHANGELOG.md | 1 + .../indices/IndicesRequestCacheIT.java | 4 +- .../highlight/HighlighterSearchIT.java | 134 ++++++++++++++---- .../test/OpenSearchIntegTestCase.java | 51 +++++++ .../ParameterizedOpenSearchIntegTestCase.java | 8 ++ 5 files changed, 168 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 234b08398f9ef..34fd573b295b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -95,6 +95,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) - Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) - Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) +- Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) - [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) - Added cluster setting cluster.restrict.index.replication_type to restrict setting of index setting replication type ([#10866](https://github.com/opensearch-project/OpenSearch/pull/10866)) - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 98a22717019cf..848f6eddbb0df 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -539,7 +539,7 @@ public void testCanCache() throws Exception { assertCacheState(client, "index", 0, 4); } - public void testCacheWithFilteredAlias() { + public void testCacheWithFilteredAlias() throws InterruptedException { Client client = client(); Settings settings = Settings.builder() .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) @@ -562,6 +562,8 @@ public void testCacheWithFilteredAlias() { OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); + indexRandomForConcurrentSearch("index"); + assertCacheState(client, "index", 0, 0); SearchResponse r1 = client.prepareSearch("index") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 42d91ac945662..f7bc5eb75ad0f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -155,7 +155,7 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, MockKeywordPlugin.class, MockAnalysisPlugin.class); } - public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOException { + public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder(); mappings.startObject(); mappings.startObject("properties") @@ -177,6 +177,7 @@ public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOExceptio .setSource(jsonBuilder().startObject().array("tags", "foo baz", "foo baz", "foo baz", "foo bar").field("sort", 2).endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); for (BoundaryScannerType scanner : BoundaryScannerType.values()) { SearchResponse search = client().prepareSearch() @@ -190,12 +191,13 @@ public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOExceptio } } - public void testHighlightingWithStoredKeyword() throws IOException { + public void testHighlightingWithStoredKeyword() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder(); mappings.startObject(); mappings.startObject("properties").startObject("text").field("type", "keyword").field("store", true).endObject().endObject(); mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); + indexRandomForConcurrentSearch("test"); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "foo").endObject()).get(); refresh(); SearchResponse search = client().prepareSearch() @@ -205,7 +207,7 @@ public void testHighlightingWithStoredKeyword() throws IOException { assertHighlight(search, 0, "text", 0, equalTo("foo")); } - public void testHighlightingWithWildcardName() throws IOException { + public void testHighlightingWithWildcardName() throws IOException, InterruptedException { // test the kibana case with * as fieldname that will try highlight all fields including meta fields XContentBuilder mappings = jsonBuilder(); mappings.startObject(); @@ -221,6 +223,7 @@ public void testHighlightingWithWildcardName() throws IOException { assertAcked(prepareCreate("test").setMapping(mappings)); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "text").endObject()).get(); refresh(); + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { SearchResponse search = client().prepareSearch() .setQuery(constantScoreQuery(matchQuery("text", "text"))) @@ -230,7 +233,7 @@ public void testHighlightingWithWildcardName() throws IOException { } } - public void testFieldAlias() throws IOException { + public void testFieldAlias() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder().startObject() .startObject("properties") .startObject("text") @@ -248,7 +251,7 @@ public void testFieldAlias() throws IOException { client().prepareIndex("test").setId("1").setSource("text", "foo").get(); refresh(); - + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); @@ -257,7 +260,7 @@ public void testFieldAlias() throws IOException { } } - public void testFieldAliasWithSourceLookup() throws IOException { + public void testFieldAliasWithSourceLookup() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder().startObject() .startObject("properties") .startObject("text") @@ -276,7 +279,7 @@ public void testFieldAliasWithSourceLookup() throws IOException { client().prepareIndex("test").setId("1").setSource("text", "foo bar").get(); refresh(); - + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); @@ -285,7 +288,7 @@ public void testFieldAliasWithSourceLookup() throws IOException { } } - public void testFieldAliasWithWildcardField() throws IOException { + public void testFieldAliasWithWildcardField() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder().startObject() .startObject("properties") .startObject("keyword") @@ -301,13 +304,14 @@ public void testFieldAliasWithWildcardField() throws IOException { client().prepareIndex("test").setId("1").setSource("keyword", "foo").get(); refresh(); + indexRandomForConcurrentSearch("test"); HighlightBuilder builder = new HighlightBuilder().field(new Field("al*")).requireFieldMatch(false); SearchResponse search = client().prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder).get(); assertHighlight(search, 0, "alias", 0, equalTo("foo")); } - public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException { + public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder(); mappings.startObject(); mappings.startObject("_source") @@ -334,6 +338,7 @@ public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOExc .setSource(jsonBuilder().startObject().field("unstored_text", "text").field("text", "text").endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { SearchResponse search = client().prepareSearch() .setQuery(constantScoreQuery(matchQuery("text", "text"))) @@ -350,7 +355,7 @@ public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOExc } // see #3486 - public void testHighTermFrequencyDoc() throws IOException { + public void testHighTermFrequencyDoc() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping("name", "type=text,term_vector=with_positions_offsets,store=" + randomBoolean())); StringBuilder builder = new StringBuilder(); for (int i = 0; i < 6000; i++) { @@ -358,6 +363,7 @@ public void testHighTermFrequencyDoc() throws IOException { } client().prepareIndex("test").setId("1").setSource("name", builder.toString()).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(constantScoreQuery(matchQuery("name", "abc"))) .highlighter(new HighlightBuilder().field("name")) @@ -385,6 +391,7 @@ public void testEnsureNoNegativeOffsets() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(matchQuery("long_term", "thisisaverylongwordandmakessurethisfails foo highlighed")) .highlighter(new HighlightBuilder().field("long_term", 18, 1).highlighterType("fvh")) @@ -671,7 +678,7 @@ public void testHighlightIssue1994() throws Exception { assertHighlight(search, 0, "titleTV", 1, 2, equalTo("highlight other text")); } - public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { + public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() throws InterruptedException { createIndex("test"); ensureGreen(); @@ -684,6 +691,7 @@ public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1 and field2 produces different tags"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -734,6 +742,7 @@ public void testHighlightingOnWildcardFields() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field*"); SearchSourceBuilder source = searchSource() @@ -783,6 +792,7 @@ public void testForceSourceWithSourceDisabled() throws Exception { .setSource("field1", "The quick brown fox jumps over the lazy dog", "field2", "second field content") .get(); refresh(); + indexRandomForConcurrentSearch("test"); // works using stored field SearchResponse searchResponse = client().prepareSearch("test") @@ -823,6 +833,7 @@ public void testPlainHighlighter() throws Exception { client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -1025,6 +1036,7 @@ public void testFVHManyMatches() throws Exception { String value = new String(new char[1024 * 256 / pattern.length()]).replace("\0", pattern); client().prepareIndex("test").setSource("field1", value).get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1 with default phrase limit"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "t")) @@ -1116,6 +1128,7 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception ); index("test", "type1", "3", "foo", "weird", "bar", "result"); refresh(); + indexRandomForConcurrentSearch("test"); Field fooField = new Field("foo").numOfFragments(1) .order("score") @@ -1408,6 +1421,7 @@ public void testMultiMapperVectorWithStore() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1453,6 +1467,7 @@ public void testMultiMapperVectorFromSource() throws Exception { client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1498,6 +1513,7 @@ public void testMultiMapperNoVectorWithStore() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1542,6 +1558,7 @@ public void testMultiMapperNoVectorFromSource() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1571,6 +1588,7 @@ public void testFastVectorHighlighterShouldFailIfNoTermVectors() throws Exceptio .setSource("title", "This is a test for the enabling fast vector highlighter"); } indexRandom(true, indexRequestBuilders); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(matchPhraseQuery("title", "this is a test")) @@ -1608,6 +1626,7 @@ public void testDisableFastVectorHighlighter() throws Exception { .setSource("title", "This is a test for the workaround for the fast vector highlighting SOLR-3724"); } indexRandom(true, indexRequestBuilders); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(matchPhraseQuery("title", "test for the workaround")) @@ -1669,6 +1688,7 @@ public void testFSHHighlightAllMvFragments() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "tag")) @@ -1686,11 +1706,12 @@ public void testFSHHighlightAllMvFragments() throws Exception { ); } - public void testBoostingQuery() { + public void testBoostingQuery() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -1702,11 +1723,12 @@ public void testBoostingQuery() { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } - public void testBoostingQueryTermVector() throws IOException { + public void testBoostingQueryTermVector() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -1718,12 +1740,13 @@ public void testBoostingQueryTermVector() throws IOException { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } - public void testCommonTermsQuery() { + public void testCommonTermsQuery() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) @@ -1733,12 +1756,13 @@ public void testCommonTermsQuery() { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } - public void testCommonTermsTermVector() throws IOException { + public void testCommonTermsTermVector() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) .highlighter(highlight().field("field2").order("score").preTags("").postTags("")); @@ -1764,6 +1788,7 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) @@ -1816,12 +1841,13 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { ); } - public void testPlainHighlighterMultipleFields() { + public void testPlainHighlighterMultipleFields() throws InterruptedException { createIndex("test"); ensureGreen(); index("test", "type1", "1", "field1", "The quick brown fox", "field2", "The slow brown fox"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("field1", "fox")) @@ -1834,7 +1860,7 @@ public void testPlainHighlighterMultipleFields() { assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); } - public void testFastVectorHighlighterMultipleFields() { + public void testFastVectorHighlighterMultipleFields() throws InterruptedException { assertAcked( prepareCreate("test").setMapping( "field1", @@ -1847,6 +1873,7 @@ public void testFastVectorHighlighterMultipleFields() { index("test", "type1", "1", "field1", "The quick brown fox", "field2", "The slow brown fox"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("field1", "fox")) @@ -1864,6 +1891,7 @@ public void testMissingStoredField() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "highlight").endObject()).get(); refresh(); + indexRandomForConcurrentSearch("test"); // This query used to fail when the field to highlight was absent SearchResponse response = client().prepareSearch("test") @@ -1904,6 +1932,7 @@ public void testNumericHighlighting() throws Exception { .setSource("text", "opensearch test", "byte", 25, "short", 42, "int", 100, "long", -1, "float", 3.2f, "double", 42.42) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("text", "test")) @@ -1926,6 +1955,7 @@ public void testResetTwice() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("text", "opensearch test").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("text", "test")) @@ -1935,7 +1965,7 @@ public void testResetTwice() throws Exception { assertHitCount(response, 1L); } - public void testHighlightUsesHighlightQuery() throws IOException { + public void testHighlightUsesHighlightQuery() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( "text", @@ -1946,6 +1976,7 @@ public void testHighlightUsesHighlightQuery() throws IOException { index("test", "type1", "1", "text", "Testing the highlight query feature"); refresh(); + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { HighlightBuilder.Field field = new HighlightBuilder.Field("text"); @@ -1981,7 +2012,11 @@ private static String randomStoreField() { return ""; } - public void testHighlightNoMatchSize() throws IOException { + public void testHighlightNoMatchSize() throws IOException, InterruptedException { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10900", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setMapping( "text", @@ -1993,6 +2028,7 @@ public void testHighlightNoMatchSize() throws IOException { String text = "I am pretty long so some of me should get cut off. Second sentence"; index("test", "type1", "1", "text", text); refresh(); + indexRandomForConcurrentSearch("test"); // When you don't set noMatchSize you don't get any results if there isn't anything to highlight. HighlightBuilder.Field field = new HighlightBuilder.Field("text").fragmentSize(21).numOfFragments(1).highlighterType("plain"); @@ -2091,7 +2127,11 @@ public void testHighlightNoMatchSize() throws IOException { assertNotHighlighted(response, 0, "text"); } - public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { + public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException, InterruptedException { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10900", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setMapping( "text", @@ -2104,6 +2144,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { String text2 = "I am short"; index("test", "type1", "1", "text", new String[] { text1, text2 }); refresh(); + indexRandomForConcurrentSearch("test"); // The no match fragment should come from the first value of a multi-valued field HighlightBuilder.Field field = new HighlightBuilder.Field("text").fragmentSize(21) @@ -2186,7 +2227,11 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { assertNotHighlighted(response, 0, "text"); } - public void testHighlightNoMatchSizeNumberOfFragments() throws IOException { + public void testHighlightNoMatchSizeNumberOfFragments() throws IOException, InterruptedException { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10900", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setMapping( "text", @@ -2200,6 +2245,7 @@ public void testHighlightNoMatchSizeNumberOfFragments() throws IOException { String text3 = "This is the fifth sentence"; index("test", "type1", "1", "text", new String[] { text1, text2, text3 }); refresh(); + indexRandomForConcurrentSearch("test"); // The no match fragment should come from the first value of a multi-valued field HighlightBuilder.Field field = new HighlightBuilder.Field("text").fragmentSize(1) @@ -2243,6 +2289,7 @@ public void testPostingsHighlighter() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -2320,6 +2367,7 @@ public void testPostingsHighlighterMultipleFields() throws Exception { "The slow brown fox. Second sentence." ); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("field1", "fox")) @@ -2344,6 +2392,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "fox")) @@ -2376,6 +2425,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); source = searchSource().query(termQuery("field1", "fox")) .highlighter(highlight().field(new Field("field1").numOfFragments(0).preTags("").postTags(""))); @@ -2412,7 +2462,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { } } - public void testMultiMatchQueryHighlight() throws IOException { + public void testMultiMatchQueryHighlight() throws IOException, InterruptedException { XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() .startObject("properties") @@ -2434,6 +2484,7 @@ public void testMultiMatchQueryHighlight() throws IOException { .setSource("field1", "The quick brown fox jumps over", "field2", "The quick brown fox jumps over") .get(); refresh(); + indexRandomForConcurrentSearch("test"); final int iters = scaledRandomIntBetween(20, 30); for (int i = 0; i < iters; i++) { String highlighterType = rarely() ? null : RandomPicks.randomFrom(random(), ALL_TYPES); @@ -2479,6 +2530,7 @@ public void testPostingsHighlighterOrderByScore() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "sentence")) @@ -2565,6 +2617,7 @@ public void testPostingsHighlighterMultiMapperWithStore() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test . Second sentence.").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse searchResponse = client().prepareSearch() @@ -2623,6 +2676,7 @@ public void testPostingsHighlighterMultiMapperFromSource() throws Exception { client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse searchResponse = client().prepareSearch() @@ -2672,13 +2726,14 @@ public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception { assertNoFailures(search); } - public void testPostingsHighlighterBoostingQuery() throws IOException { + public void testPostingsHighlighterBoostingQuery() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); client().prepareIndex("test") .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -2689,7 +2744,7 @@ public void testPostingsHighlighterBoostingQuery() throws IOException { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog! Second sentence.")); } - public void testPostingsHighlighterCommonTermsQuery() throws IOException { + public void testPostingsHighlighterCommonTermsQuery() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); @@ -2697,6 +2752,7 @@ public void testPostingsHighlighterCommonTermsQuery() throws IOException { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) @@ -2738,6 +2794,7 @@ public void testPostingsHighlighterPrefixQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(prefixQuery("field2", "qui")).highlighter(highlight().field("field2")); @@ -2760,6 +2817,7 @@ public void testPostingsHighlighterFuzzyQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(fuzzyQuery("field2", "quck")).highlighter(highlight().field("field2")); @@ -2783,6 +2841,7 @@ public void testPostingsHighlighterRegexpQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(regexpQuery("field2", "qu[a-l]+k")).highlighter(highlight().field("field2")); @@ -2806,6 +2865,7 @@ public void testPostingsHighlighterWildcardQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(wildcardQuery("field2", "qui*")).highlighter(highlight().field("field2")); @@ -2840,6 +2900,7 @@ public void testPostingsHighlighterTermRangeQuery() throws Exception { client().prepareIndex("test").setSource("field1", "this is a test", "field2", "aaab").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(rangeQuery("field2").gte("aaaa").lt("zzzz")) @@ -2857,6 +2918,7 @@ public void testPostingsHighlighterQueryString() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(queryStringQuery("qui*").defaultField("field2")) @@ -2878,6 +2940,7 @@ public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(constantScoreQuery(regexpQuery("field1", "pho[a-z]+"))) @@ -2892,6 +2955,7 @@ public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Excepti client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -2909,6 +2973,7 @@ public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Except client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -2924,6 +2989,7 @@ public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Excep client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -3028,7 +3094,7 @@ public void testFastVectorHighlighterPhraseBoost() throws Exception { * because it doesn't support the concept of terms having a different weight based on position. * @param highlighterType highlighter to test */ - private void phraseBoostTestCase(String highlighterType) { + private void phraseBoostTestCase(String highlighterType) throws InterruptedException { ensureGreen(); StringBuilder text = new StringBuilder(); text.append("words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk\n"); @@ -3041,6 +3107,7 @@ private void phraseBoostTestCase(String highlighterType) { } index("test", "type1", "1", "field1", text.toString()); refresh(); + indexRandomForConcurrentSearch("test"); // Match queries phraseBoostTestCaseForClauses( @@ -3109,7 +3176,7 @@ private

> void phraseBoostTestCaseForClauses( assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher); } - public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOException { + public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOException, InterruptedException { // check that we do not get an exception for geo_point fields in case someone tries to highlight // it accidentially with a wildcard // see https://github.com/elastic/elasticsearch/issues/17537 @@ -3133,6 +3200,7 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti .setSource(jsonBuilder().startObject().field("text", "Arbitrary text field which will should not cause a failure").endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); String highlighterType = randomFrom(ALL_TYPES); QueryBuilder query = QueryBuilders.boolQuery() .should( @@ -3150,7 +3218,7 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti assertThat(search.getHits().getAt(0).getHighlightFields().get("text").fragments().length, equalTo(1)); } - public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException { + public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException, InterruptedException { // same as above but in this example the query gets rewritten during highlighting // see https://github.com/elastic/elasticsearch/issues/17537#issuecomment-244939633 XContentBuilder mappings = jsonBuilder(); @@ -3177,6 +3245,7 @@ public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); QueryBuilder query = QueryBuilders.functionScoreQuery( QueryBuilders.boolQuery() @@ -3192,7 +3261,7 @@ public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException assertThat(search.getHits().getTotalHits().value, equalTo(1L)); } - public void testKeywordFieldHighlighting() throws IOException { + public void testKeywordFieldHighlighting() throws IOException, InterruptedException { // check that keyword highlighting works XContentBuilder mappings = jsonBuilder(); mappings.startObject(); @@ -3205,6 +3274,7 @@ public void testKeywordFieldHighlighting() throws IOException { .setSource(jsonBuilder().startObject().field("keyword_field", "some text").endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setSource( new SearchSourceBuilder().query(QueryBuilders.matchQuery("keyword_field", "some text")) @@ -3238,6 +3308,7 @@ public void testCopyToFields() throws Exception { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchQuery("foo_copy", "brown")) .highlighter(new HighlightBuilder().field(new Field("foo_copy"))) @@ -3287,7 +3358,7 @@ public void testACopyFieldWithNestedQuery() throws Exception { ) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) .highlighter(new HighlightBuilder().field(new Field("foo_text").highlighterType("fvh")).requireFieldMatch(false)) @@ -3305,6 +3376,7 @@ public void testFunctionScoreQueryHighlight() throws Exception { .setSource(jsonBuilder().startObject().field("text", "brown").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) @@ -3322,6 +3394,7 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception { .setSource(jsonBuilder().startObject().field("text", "brown").field("enable", "yes").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); FunctionScoreQueryBuilder.FilterFunctionBuilder filterBuilder = new FunctionScoreQueryBuilder.FilterFunctionBuilder( QueryBuilders.termQuery("enable", "yes"), new RandomScoreFunctionBuilder() @@ -3420,6 +3493,7 @@ public void testWithNestedQuery() throws Exception { ) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); for (String type : new String[] { "unified", "plain" }) { SearchResponse searchResponse = client().prepareSearch() @@ -3477,6 +3551,7 @@ public void testWithNormalizer() throws Exception { .setSource("keyword", "Hello World") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); for (String highlighterType : new String[] { "unified", "plain" }) { SearchResponse searchResponse = client().prepareSearch() @@ -3499,6 +3574,7 @@ public void testDisableHighlightIdField() throws Exception { .setSource("keyword", "Hello World") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); for (String highlighterType : new String[] { "plain", "unified" }) { SearchResponse searchResponse = client().prepareSearch() diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index ad27d9834f159..0c6c81103922f 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -1666,6 +1666,11 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } } assertThat(actualErrors, emptyIterable()); + + if (dummyDocuments) { + bogusIds.addAll(indexRandomForMultipleSlices(indicesArray)); + } + if (!bogusIds.isEmpty()) { // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs! for (List doc : bogusIds) { @@ -1683,6 +1688,52 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } } + /* + * This method ingests bogus documents for the given indices such that multiple slices + * are formed. This is useful for testing with the concurrent search use-case as it creates + * multiple slices based on segment count. + * @param indices the indices in which bogus documents should be ingested + * */ + protected Set> indexRandomForMultipleSlices(String... indices) throws InterruptedException { + Set> bogusIds = new HashSet<>(); + int refreshCount = randomIntBetween(2, 3); + for (String index : indices) { + int numDocs = getNumShards(index).totalNumShards * randomIntBetween(2, 10); + while (refreshCount-- > 0) { + final CopyOnWriteArrayList> errors = new CopyOnWriteArrayList<>(); + List inFlightAsyncOperations = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + String id = "bogus_doc_" + randomRealisticUnicodeOfLength(between(1, 10)) + dummmyDocIdGenerator.incrementAndGet(); + IndexRequestBuilder indexRequestBuilder = client().prepareIndex() + .setIndex(index) + .setId(id) + .setSource("{}", MediaTypeRegistry.JSON) + .setRouting(id); + indexRequestBuilder.execute( + new PayloadLatchedActionListener<>(indexRequestBuilder, newLatch(inFlightAsyncOperations), errors) + ); + bogusIds.add(Arrays.asList(index, id)); + } + for (CountDownLatch operation : inFlightAsyncOperations) { + operation.await(); + } + final List actualErrors = new ArrayList<>(); + for (Tuple tuple : errors) { + Throwable t = ExceptionsHelper.unwrapCause(tuple.v2()); + if (t instanceof OpenSearchRejectedExecutionException) { + logger.debug("Error indexing doc: " + t.getMessage() + ", reindexing."); + tuple.v1().execute().actionGet(); // re-index if rejected + } else { + actualErrors.add(tuple.v2()); + } + } + assertThat(actualErrors, emptyIterable()); + refresh(index); + } + } + return bogusIds; + } + private final AtomicInteger dummmyDocIdGenerator = new AtomicInteger(); /** Disables an index block for the specified index */ diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java index 636064d8e4f9d..f8813a8c5afa9 100644 --- a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java @@ -13,6 +13,8 @@ import org.junit.After; import org.junit.Before; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; + /** * Base class for running the tests with parameterization of the dynamic settings * For any class that wants to use parameterization, use @ParametersFactory to generate @@ -44,4 +46,10 @@ public void afterTests() { dynamicSettings.keySet().forEach(settingsToUnset::putNull); client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settingsToUnset).get(); } + + public void indexRandomForConcurrentSearch(String... indices) throws InterruptedException { + if (dynamicSettings.get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()).equals("true")) { + indexRandomForMultipleSlices(indices); + } + } } From 45f7be1c603d13a1ce48e5da449ae4610ba04e52 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Sat, 28 Oct 2023 15:43:14 +0530 Subject: [PATCH 102/178] =?UTF-8?q?Ensure=20that=20segments=20are=20upload?= =?UTF-8?q?=20to=20remote=20store=20in=20case=20of=20local=20and=20?= =?UTF-8?q?=E2=80=A6=20(#10948)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --------- Signed-off-by: Gaurav Bafna --- .../indices/create/RemoteCloneIndexIT.java | 133 +++++ .../indices/create/RemoteShrinkIndexIT.java | 545 ++++++++++++++++++ .../indices/create/RemoteSplitIndexIT.java | 506 ++++++++++++++++ .../remotestore/RemoteRestoreSnapshotIT.java | 43 +- .../RemoteStoreBaseIntegTestCase.java | 2 +- .../opensearch/index/shard/IndexShard.java | 24 + .../shard/RemoteStoreRefreshListener.java | 22 +- .../opensearch/index/shard/StoreRecovery.java | 21 + .../index/shard/IndexShardTests.java | 1 + 9 files changed, 1278 insertions(+), 19 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java new file mode 100644 index 0000000000000..a081110e6c5a1 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.create; + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +import org.opensearch.Version; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.VersionUtils; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; + +public class RemoteCloneIndexIT extends RemoteStoreBaseIntegTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public void testCreateCloneIndex() { + Version version = VersionUtils.randomIndexCompatibleVersion(random()); + int numPrimaryShards = randomIntBetween(1, 5); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", numPrimaryShards).put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + internalCluster().ensureAtLeastNumDataNodes(2); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + try { + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.CLONE) + .setSettings(Settings.builder().put("index.number_of_replicas", 0).putNull("index.blocks.write").build()) + .get() + ); + ensureGreen(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + assertThat(targetStats.getIndex("target").getIndexShards().keySet().size(), equalTo(numPrimaryShards)); + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertHitCount( + client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + 2 * docs + ); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + } finally { + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java new file mode 100644 index 0000000000000..282eb9c6ad95e --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java @@ -0,0 +1,545 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.create; + +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedSetSelector; +import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.util.Constants; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterInfoService; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.InternalClusterInfoService; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.Murmur3HashFunction; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.VersionUtils; + +import java.util.Arrays; +import java.util.Map; +import java.util.stream.IntStream; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class RemoteShrinkIndexIT extends RemoteStoreBaseIntegTestCase { + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + } + + public void testCreateShrinkIndexToN() { + + assumeFalse("https://github.com/elastic/elasticsearch/issues/34080", Constants.WINDOWS); + + int[][] possibleShardSplits = new int[][] { { 8, 4, 2 }, { 9, 3, 1 }, { 4, 2, 1 }, { 15, 5, 1 } }; + int[] shardSplits = randomFrom(possibleShardSplits); + assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]); + assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + // now merge source into a 4 shard index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "first_shrink") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[1]) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + for (int i = 0; i < 20; i++) { // now update + client().prepareIndex("first_shrink") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("first_shrink") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + // now merge source into a 2 shard index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("first_shrink", "second_shrink") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[2]) + .putNull("index.blocks.write") + .putNull("index.routing.allocation.require._name") + .build() + ) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + // let it be allocated anywhere and bump replicas + client().admin() + .indices() + .prepareUpdateSettings("second_shrink") + .setSettings(Settings.builder().putNull("index.routing.allocation.include._id").put("index.number_of_replicas", 0)) + .get(); + ensureGreen(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + for (int i = 0; i < 20; i++) { // now update + client().prepareIndex("second_shrink") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + } + + public void testShrinkIndexPrimaryTerm() throws Exception { + int numberOfShards = randomIntBetween(2, 20); + int numberOfTargetShards = randomValueOtherThanMany(n -> numberOfShards % n != 0, () -> randomIntBetween(1, numberOfShards - 1)); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get(); + + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertThat(dataNodes.size(), greaterThanOrEqualTo(2)); + final DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + final String mergeNode = discoveryNodes[0].getName(); + // This needs more than the default timeout if a large number of shards were created. + ensureGreen(TimeValue.timeValueSeconds(120)); + + // fail random primary shards to force primary terms to increase + final Index source = resolveIndex("source"); + final int iterations = scaledRandomIntBetween(0, 16); + for (int i = 0; i < iterations; i++) { + final String node = randomSubsetOf(1, internalCluster().nodesInclude("source")).get(0); + final IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + final IndexService indexShards = indexServices.indexServiceSafe(source); + for (final Integer shardId : indexShards.shardIds()) { + final IndexShard shard = indexShards.getShard(shardId); + if (shard.routingEntry().primary() && randomBoolean()) { + disableAllocation("source"); + shard.failShard("test", new Exception("test")); + // this can not succeed until the shard is failed and a replica is promoted + int id = 0; + while (true) { + // find an ID that routes to the right shard, we will only index to the shard that saw a primary failure + final String s = Integer.toString(id); + final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); + if (hash == shardId) { + final IndexRequest request = new IndexRequest("source").id(s) + .source("{ \"f\": \"" + s + "\"}", MediaTypeRegistry.JSON); + client().index(request).get(); + break; + } else { + id++; + } + } + enableAllocation("source"); + ensureGreen(); + } + } + } + + // relocate all shards to one node such that we can merge it. + final Settings.Builder prepareShrinkSettings = Settings.builder() + .put("index.routing.allocation.require._name", mergeNode) + .put("index.blocks.write", true); + client().admin().indices().prepareUpdateSettings("source").setSettings(prepareShrinkSettings).get(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards + + final IndexMetadata indexMetadata = indexMetadata(client(), "source"); + final long beforeShrinkPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetadata::primaryTerm).max().getAsLong(); + + // now merge source into target + final Settings shrinkSettings = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", numberOfTargetShards) + .build(); + assertAcked(client().admin().indices().prepareResizeIndex("source", "target").setSettings(shrinkSettings).get()); + + ensureGreen(TimeValue.timeValueSeconds(120)); + + final IndexMetadata afterShrinkIndexMetadata = indexMetadata(client(), "target"); + for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { + assertThat(afterShrinkIndexMetadata.primaryTerm(shardId), equalTo(beforeShrinkPrimaryTerm + 1)); + } + } + + private static IndexMetadata indexMetadata(final Client client, final String index) { + final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + return clusterStateResponse.getState().metadata().index(index); + } + + public void testCreateShrinkIndex() { + internalCluster().ensureAtLeastNumDataNodes(2); + Version version = VersionUtils.randomVersion(random()); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings( + Settings.builder() + .put("index.routing.allocation.require._name", discoveryNodes[0].getName()) + .put("index.blocks.write", true) + ) + .get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + + // now merge source into a single shard index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .putNull("index.blocks.write") + .putNull("index.routing.allocation.require._name") + .build() + ) + .get() + ); + ensureGreen(); + + // resolve true merge node - this is not always the node we required as all shards may be on another node + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); + logger.info("merge node {}", mergeNode); + + final long maxSeqNo = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getSeqNoStats) + .mapToLong(SeqNoStats::getMaxSeqNo) + .max() + .getAsLong(); + final long maxUnsafeAutoIdTimestamp = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getStats) + .map(CommonStats::getSegments) + .mapToLong(SegmentsStats::getMaxUnsafeAutoIdTimestamp) + .max() + .getAsLong(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + for (final ShardStats shardStats : targetStats.getShards()) { + final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); + final ShardRouting shardRouting = shardStats.getShardRouting(); + assertThat("failed on " + shardRouting, seqNoStats.getMaxSeqNo(), equalTo(maxSeqNo)); + assertThat("failed on " + shardRouting, seqNoStats.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat( + "failed on " + shardRouting, + shardStats.getStats().getSegments().getMaxUnsafeAutoIdTimestamp(), + equalTo(maxUnsafeAutoIdTimestamp) + ); + } + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 2 * docs); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + /** + * Tests that we can manually recover from a failed allocation due to shards being moved away etc. + */ + public void testCreateShrinkIndexFails() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("number_of_replicas", 0) + ).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + String spareNode = discoveryNodes[0].getName(); + String mergeNode = discoveryNodes[1].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + + // now merge source into a single shard index + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setWaitForActiveShards(ActiveShardCount.NONE) + .setSettings( + Settings.builder() + .put("index.routing.allocation.exclude._name", mergeNode) // we manually exclude the merge node to forcefully fuck it up + .put("index.number_of_replicas", 0) + .put("index.allocation.max_retries", 1) + .build() + ) + .get(); + client().admin().cluster().prepareHealth("target").setWaitForEvents(Priority.LANGUID).get(); + + // now we move all shards away from the merge node + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", spareNode).put("index.blocks.write", true)) + .get(); + ensureGreen("source"); + + client().admin() + .indices() + .prepareUpdateSettings("target") // erase the forcefully fuckup! + .setSettings(Settings.builder().putNull("index.routing.allocation.exclude._name")) + .get(); + // wait until it fails + assertBusy(() -> { + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + RoutingTable routingTables = clusterStateResponse.getState().routingTable(); + assertTrue(routingTables.index("target").shard(0).getShards().get(0).unassigned()); + assertEquals( + UnassignedInfo.Reason.ALLOCATION_FAILED, + routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getReason() + ); + assertEquals(1, routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations()); + }); + client().admin() + .indices() + .prepareUpdateSettings("source") // now relocate them all to the right node + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode)) + .get(); + ensureGreen("source"); + + final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance( + ClusterInfoService.class, + internalCluster().getClusterManagerName() + ); + infoService.refresh(); + // kick off a retry and wait until it's done! + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + long expectedShardSize = clusterRerouteResponse.getState() + .routingTable() + .index("target") + .shard(0) + .getShards() + .get(0) + .getExpectedShardSize(); + // we support the expected shard size in the allocator to sum up over the source index shards + assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0); + ensureGreen(); + assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + } + + public void testCreateShrinkWithIndexSort() throws Exception { + SortField expectedSortField = new SortedSetSortField("id", true, SortedSetSelector.Type.MAX); + expectedSortField.setMissingValue(SortedSetSortField.STRING_FIRST); + Sort expectedIndexSort = new Sort(expectedSortField); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings( + Settings.builder() + .put(indexSettings()) + .put("sort.field", "id") + .put("sort.order", "desc") + .put("number_of_shards", 8) + .put("number_of_replicas", 0) + ).setMapping("id", "type=keyword,doc_values=true").get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + + flushAndRefresh(); + assertSortedSegments("source", expectedIndexSort); + + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + + // check that index sort cannot be set on the target index + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", "2") + .put("index.sort.field", "foo") + .build() + ) + .get() + ); + assertThat(exc.getMessage(), containsString("can't override index sort when resizing an index")); + + // check that the index sort order of `source` is correctly applied to the `target` + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", "2") + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + flushAndRefresh(); + GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings("target").execute().actionGet(); + assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); + assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); + assertSortedSegments("target", expectedIndexSort); + + // ... and that the index sort is also applied to updates + for (int i = 20; i < 40; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertSortedSegments("target", expectedIndexSort); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java new file mode 100644 index 0000000000000..dd4252d24f314 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java @@ -0,0 +1,506 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.admin.indices.create; + +import org.apache.lucene.search.join.ScoreMode; +import org.apache.lucene.util.Constants; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.get.GetResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.MetadataCreateIndexService; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.Murmur3HashFunction; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.VersionUtils; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.stream.IntStream; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.nestedQuery; +import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +public class RemoteSplitIndexIT extends RemoteStoreBaseIntegTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + } + + public void testCreateSplitIndexToN() throws IOException { + int[][] possibleShardSplits = new int[][] { { 2, 4, 8 }, { 3, 6, 12 }, { 1, 2, 4 } }; + int[] shardSplits = randomFrom(possibleShardSplits); + splitToN(shardSplits[0], shardSplits[1], shardSplits[2]); + } + + public void testSplitFromOneToN() { + + assumeFalse("https://github.com/elastic/elasticsearch/issues/34080", Constants.WINDOWS); + + splitToN(1, 5, 10); + client().admin().indices().prepareDelete("*").get(); + int randomSplit = randomIntBetween(2, 6); + splitToN(1, randomSplit, randomSplit * 2); + } + + private void splitToN(int sourceShards, int firstSplitShards, int secondSplitShards) { + + assertEquals(sourceShards, (sourceShards * firstSplitShards) / firstSplitShards); + assertEquals(firstSplitShards, (firstSplitShards * secondSplitShards) / secondSplitShards); + internalCluster().ensureAtLeastNumDataNodes(2); + final boolean useRouting = randomBoolean(); + final boolean useNested = randomBoolean(); + final boolean useMixedRouting = useRouting ? randomBoolean() : false; + CreateIndexRequestBuilder createInitialIndex = prepareCreate("source"); + Settings.Builder settings = Settings.builder().put(indexSettings()).put("number_of_shards", sourceShards); + final boolean useRoutingPartition; + if (randomBoolean()) { + // randomly set the value manually + int routingShards = secondSplitShards * randomIntBetween(1, 10); + settings.put("index.number_of_routing_shards", routingShards); + useRoutingPartition = false; + } else { + useRoutingPartition = randomBoolean(); + } + if (useRouting && useMixedRouting == false && useRoutingPartition) { + int numRoutingShards = MetadataCreateIndexService.calculateNumRoutingShards(secondSplitShards, Version.CURRENT) - 1; + settings.put("index.routing_partition_size", randomIntBetween(1, numRoutingShards)); + if (useNested) { + createInitialIndex.setMapping("_routing", "required=true", "nested1", "type=nested"); + } else { + createInitialIndex.setMapping("_routing", "required=true"); + } + } else if (useNested) { + createInitialIndex.setMapping("nested1", "type=nested"); + } + logger.info("use routing {} use mixed routing {} use nested {}", useRouting, useMixedRouting, useNested); + createInitialIndex.setSettings(settings).get(); + + int numDocs = randomIntBetween(10, 50); + String[] routingValue = new String[numDocs]; + + BiFunction indexFunc = (index, id) -> { + try { + return client().prepareIndex(index) + .setId(Integer.toString(id)) + .setSource( + jsonBuilder().startObject() + .field("foo", "bar") + .field("i", id) + .startArray("nested1") + .startObject() + .field("n_field1", "n_value1_1") + .field("n_field2", "n_value2_1") + .endObject() + .startObject() + .field("n_field1", "n_value1_2") + .field("n_field2", "n_value2_2") + .endObject() + .endArray() + .endObject() + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }; + for (int i = 0; i < numDocs; i++) { + IndexRequestBuilder builder = indexFunc.apply("source", i); + if (useRouting) { + String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); + if (useMixedRouting && randomBoolean()) { + routingValue[i] = null; + } else { + routingValue[i] = routing; + } + builder.setRouting(routingValue[i]); + } + builder.get(); + } + + if (randomBoolean()) { + for (int i = 0; i < numDocs; i++) { // let's introduce some updates / deletes on the index + if (randomBoolean()) { + IndexRequestBuilder builder = indexFunc.apply("source", i); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + } + } + + ensureYellow(); + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + Settings.Builder firstSplitSettingsBuilder = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", firstSplitShards) + .putNull("index.blocks.write"); + if (sourceShards == 1 && useRoutingPartition == false && randomBoolean()) { // try to set it if we have a source index with 1 shard + firstSplitSettingsBuilder.put("index.number_of_routing_shards", secondSplitShards); + } + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "first_split") + .setResizeType(ResizeType.SPLIT) + .setSettings(firstSplitSettingsBuilder.build()) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + + for (int i = 0; i < numDocs; i++) { // now update + IndexRequestBuilder builder = indexFunc.apply("first_split", i); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + for (int i = 0; i < numDocs; i++) { + GetResponse getResponse = client().prepareGet("first_split", Integer.toString(i)).setRouting(routingValue[i]).get(); + assertTrue(getResponse.isExists()); + } + + client().admin() + .indices() + .prepareUpdateSettings("first_split") + .setSettings(Settings.builder().put("index.blocks.write", true)) + .get(); + ensureGreen(); + // now split source into a new index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("first_split", "second_split") + .setResizeType(ResizeType.SPLIT) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", secondSplitShards) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + // let it be allocated anywhere and bump replicas + client().admin() + .indices() + .prepareUpdateSettings("second_split") + .setSettings(Settings.builder().put("index.number_of_replicas", 0)) + .get(); + ensureGreen(); + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + + for (int i = 0; i < numDocs; i++) { // now update + IndexRequestBuilder builder = indexFunc.apply("second_split", i); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + flushAndRefresh(); + for (int i = 0; i < numDocs; i++) { + GetResponse getResponse = client().prepareGet("second_split", Integer.toString(i)).setRouting(routingValue[i]).get(); + assertTrue(getResponse.isExists()); + } + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + if (useNested) { + assertNested("source", numDocs); + assertNested("first_split", numDocs); + assertNested("second_split", numDocs); + } + assertAllUniqueDocs( + client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + numDocs + ); + assertAllUniqueDocs( + client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + numDocs + ); + assertAllUniqueDocs(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + } + + public void assertNested(String index, int numDocs) { + // now, do a nested query + SearchResponse searchResponse = client().prepareSearch(index) + .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) + .get(); + assertNoFailures(searchResponse); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)); + } + + public void assertAllUniqueDocs(SearchResponse response, int numDocs) { + Set ids = new HashSet<>(); + for (int i = 0; i < response.getHits().getHits().length; i++) { + String id = response.getHits().getHits()[i].getId(); + assertTrue("found ID " + id + " more than once", ids.add(id)); + } + assertEquals(numDocs, ids.size()); + } + + public void testSplitIndexPrimaryTerm() throws Exception { + int numberOfTargetShards = randomIntBetween(2, 20); + int numberOfShards = randomValueOtherThanMany(n -> numberOfTargetShards % n != 0, () -> between(1, numberOfTargetShards - 1)); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings( + Settings.builder() + .put(indexSettings()) + .put("number_of_shards", numberOfShards) + .put("index.number_of_routing_shards", numberOfTargetShards) + ).get(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to allocate many shards + + // fail random primary shards to force primary terms to increase + final Index source = resolveIndex("source"); + final int iterations = scaledRandomIntBetween(0, 16); + for (int i = 0; i < iterations; i++) { + final String node = randomSubsetOf(1, internalCluster().nodesInclude("source")).get(0); + final IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + final IndexService indexShards = indexServices.indexServiceSafe(source); + for (final Integer shardId : indexShards.shardIds()) { + final IndexShard shard = indexShards.getShard(shardId); + if (shard.routingEntry().primary() && randomBoolean()) { + disableAllocation("source"); + shard.failShard("test", new Exception("test")); + // this can not succeed until the shard is failed and a replica is promoted + int id = 0; + while (true) { + // find an ID that routes to the right shard, we will only index to the shard that saw a primary failure + final String s = Integer.toString(id); + final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); + if (hash == shardId) { + final IndexRequest request = new IndexRequest("source").id(s) + .source("{ \"f\": \"" + s + "\"}", MediaTypeRegistry.JSON); + client().index(request).get(); + break; + } else { + id++; + } + } + enableAllocation("source"); + ensureGreen(); + } + } + } + + final Settings.Builder prepareSplitSettings = Settings.builder().put("index.blocks.write", true); + client().admin().indices().prepareUpdateSettings("source").setSettings(prepareSplitSettings).get(); + ensureYellow(); + + final IndexMetadata indexMetadata = indexMetadata(client(), "source"); + final long beforeSplitPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetadata::primaryTerm).max().getAsLong(); + + // now split source into target + final Settings splitSettings = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", numberOfTargetShards) + .putNull("index.blocks.write") + .build(); + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings(splitSettings) + .get() + ); + + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards + + final IndexMetadata aftersplitIndexMetadata = indexMetadata(client(), "target"); + for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { + assertThat(aftersplitIndexMetadata.primaryTerm(shardId), equalTo(beforeSplitPrimaryTerm + 1)); + } + } + + private static IndexMetadata indexMetadata(final Client client, final String index) { + final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + return clusterStateResponse.getState().metadata().index(index); + } + + public void testCreateSplitIndex() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + Version version = VersionUtils.randomIndexCompatibleVersion(random()); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", 1).put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + try { + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 2) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); + logger.info("split node {}", mergeNode); + + final long maxSeqNo = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getSeqNoStats) + .mapToLong(SeqNoStats::getMaxSeqNo) + .max() + .getAsLong(); + final long maxUnsafeAutoIdTimestamp = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getStats) + .map(CommonStats::getSegments) + .mapToLong(SegmentsStats::getMaxUnsafeAutoIdTimestamp) + .max() + .getAsLong(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + for (final ShardStats shardStats : targetStats.getShards()) { + final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); + final ShardRouting shardRouting = shardStats.getShardRouting(); + assertThat("failed on " + shardRouting, seqNoStats.getMaxSeqNo(), equalTo(maxSeqNo)); + assertThat("failed on " + shardRouting, seqNoStats.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat( + "failed on " + shardRouting, + shardStats.getStats().getSegments().getMaxUnsafeAutoIdTimestamp(), + equalTo(maxUnsafeAutoIdTimestamp) + ); + } + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertHitCount( + client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + 2 * docs + ); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + } finally { + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 9e0b2a66467de..ad78c503a4a19 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -330,6 +330,8 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); ensureGreen(indexName1, restoredIndexName2); + + assertRemoteSegmentsAndTranslogUploaded(restoredIndexName2); assertDocsPresentInIndex(client, indexName1, numDocsInIndex1); assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); // indexing some new docs and validating @@ -355,6 +357,29 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 4); } + void assertRemoteSegmentsAndTranslogUploaded(String idx) throws IOException { + String indexUUID = client().admin().indices().prepareGetSettings(idx).get().getSetting(idx, IndexMetadata.SETTING_INDEX_UUID); + + Path remoteTranslogMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/metadata"); + Path remoteTranslogDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/data"); + Path segmentMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/metadata"); + Path segmentDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/data"); + + try ( + Stream translogMetadata = Files.list(remoteTranslogMetadataPath); + Stream translogData = Files.list(remoteTranslogDataPath); + Stream segmentMetadata = Files.list(segmentMetadataPath); + Stream segmentData = Files.list(segmentDataPath); + + ) { + assertTrue(translogData.count() > 0); + assertTrue(translogMetadata.count() > 0); + assertTrue(segmentMetadata.count() > 0); + assertTrue(segmentData.count() > 0); + } + + } + public void testRemoteRestoreIndexRestoredFromSnapshot() throws IOException, ExecutionException, InterruptedException { internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNodes(2); @@ -395,23 +420,7 @@ public void testRemoteRestoreIndexRestoredFromSnapshot() throws IOException, Exe ensureGreen(indexName1); assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); - // Make sure remote translog is empty - String indexUUID = client().admin() - .indices() - .prepareGetSettings(indexName1) - .get() - .getSetting(indexName1, IndexMetadata.SETTING_INDEX_UUID); - - Path remoteTranslogMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/metadata"); - Path remoteTranslogDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/data"); - - try ( - Stream translogMetadata = Files.list(remoteTranslogMetadataPath); - Stream translogData = Files.list(remoteTranslogDataPath) - ) { - assertTrue(translogData.count() > 0); - assertTrue(translogMetadata.count() > 0); - } + assertRemoteSegmentsAndTranslogUploaded(indexName1); // Clear the local data before stopping the node. This will make sure that remote translog is empty. IndexShard indexShard = getIndexShard(primaryNodeName(indexName1), indexName1); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index bccca283ba772..8b4981a15433a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -56,7 +56,7 @@ public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; protected static final String REPOSITORY_2_NAME = "test-remote-store-repo-2"; protected static final int SHARD_COUNT = 1; - protected static final int REPLICA_COUNT = 1; + protected static int REPLICA_COUNT = 1; protected static final String TOTAL_OPERATIONS = "total-operations"; protected static final String REFRESHED_OR_FLUSHED_OPERATIONS = "refreshed-or-flushed-operations"; protected static final String MAX_SEQ_NO_TOTAL = "max-seq-no-total"; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 352d4efc95269..32396f1a3df2e 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -203,6 +203,7 @@ import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashSet; @@ -2006,6 +2007,29 @@ private RemoteSegmentStoreDirectory getRemoteDirectory() { return ((RemoteSegmentStoreDirectory) remoteDirectory); } + /** + Returns true iff it is able to verify that remote segment store + is in sync with local + */ + boolean isRemoteSegmentStoreInSync() { + assert indexSettings.isRemoteStoreEnabled(); + try { + RemoteSegmentStoreDirectory directory = getRemoteDirectory(); + if (directory.readLatestMetadataFile() != null) { + // verifying that all files except EXCLUDE_FILES are uploaded to the remote + Collection uploadFiles = directory.getSegmentsUploadedToRemoteStore().keySet(); + SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); + Collection localFiles = segmentInfos.files(true); + if (uploadFiles.containsAll(localFiles)) { + return true; + } + } + } catch (IOException e) { + logger.error("Exception while reading latest metadata", e); + } + return false; + } + public void preRecovery() { final IndexShardState currentState = this.state; // single volatile read if (currentState == IndexShardState.CLOSED) { diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 464adc88ae16f..dd40327298874 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -20,6 +20,7 @@ import org.opensearch.action.LatchedActionListener; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.logging.Loggers; import org.opensearch.common.unit.TimeValue; @@ -179,6 +180,9 @@ private boolean shouldSync(boolean didRefresh, boolean skipPrimaryTermCheck) { return this.primaryTerm != indexShard.getOperationPrimaryTerm(); } + /* + @return false if retry is needed + */ private boolean syncSegments() { if (isReadyForUpload() == false) { // Following check is required to enable retry and make sure that we do not lose this refresh event @@ -485,7 +489,9 @@ private void initializeRemoteDirectoryOnTermUpdate() throws IOException { * @return true iff primaryMode is true and index shard is not in closed state. */ private boolean isReadyForUpload() { - boolean isReady = indexShard.getReplicationTracker().isPrimaryMode() && indexShard.state() != IndexShardState.CLOSED; + boolean isReady = (indexShard.getReplicationTracker().isPrimaryMode() && indexShard.state() != IndexShardState.CLOSED) + || isLocalOrSnapshotRecovery(); + if (isReady == false) { StringBuilder sb = new StringBuilder("Skipped syncing segments with"); if (indexShard.getReplicationTracker() != null) { @@ -497,11 +503,25 @@ private boolean isReadyForUpload() { if (indexShard.getEngineOrNull() != null) { sb.append(" engineType=").append(indexShard.getEngine().getClass().getSimpleName()); } + if (isLocalOrSnapshotRecovery() == false) { + sb.append(" recoverySourceType=").append(indexShard.recoveryState().getRecoverySource().getType()); + sb.append(" primary=").append(indexShard.shardRouting.primary()); + } logger.trace(sb.toString()); } return isReady; } + private boolean isLocalOrSnapshotRecovery() { + // In this case when the primary mode is false, we need to upload segments to Remote Store + // This is required in case of snapshots/shrink/ split/clone where we need to durable persist + // all segments to remote before completing the recovery to ensure durability. + + return (indexShard.state() == IndexShardState.RECOVERING && indexShard.shardRouting.primary()) + && (indexShard.recoveryState().getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS + || indexShard.recoveryState().getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT); + } + /** * Creates an {@link UploadListener} containing the stats population logic which would be triggered before and after segment upload events */ diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index c0211e1257c8e..e823401e5ef7e 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -191,6 +191,15 @@ void recoverFromLocalShards( // just trigger a merge to do housekeeping on the // copied segments - we will also see them in stats etc. indexShard.getEngine().forceMerge(false, -1, false, false, false, UUIDs.randomBase64UUID()); + if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.isRemoteSegmentStoreInSync() == false) { + throw new IndexShardRecoveryException( + indexShard.shardId(), + "failed to upload to remote", + new IOException("Failed to upload to remote segment store") + ); + } + } return true; } catch (IOException ex) { throw new IndexShardRecoveryException(indexShard.shardId(), "failed to recover from local shards", ex); @@ -418,6 +427,12 @@ void recoverFromSnapshotAndRemoteStore( } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); + if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.isRemoteSegmentStoreInSync() == false) { + listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); + return; + } + } indexShard.postRecovery("restore done"); listener.onResponse(true); @@ -697,6 +712,12 @@ private void restore( } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); + if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.isRemoteSegmentStoreInSync() == false) { + listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); + return; + } + } indexShard.postRecovery("restore done"); listener.onResponse(true); }, e -> listener.onFailure(new IndexShardRestoreFailedException(shardId, "restore failed", e))); diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index fa3cf7676f55c..dc2111fdcfc56 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -2850,6 +2850,7 @@ public void testSyncSegmentsFromGivenRemoteSegmentStore() throws IOException { indexDoc(source, "_doc", "1"); indexDoc(source, "_doc", "2"); source.refresh("test"); + assertTrue("At lease one remote sync should have been completed", source.isRemoteSegmentStoreInSync()); assertDocs(source, "1", "2"); indexDoc(source, "_doc", "3"); source.refresh("test"); From f372cbf89377036c67143360f903322105ceafe2 Mon Sep 17 00:00:00 2001 From: Aman Khare <85096200+amkhar@users.noreply.github.com> Date: Sat, 28 Oct 2023 23:24:03 +0530 Subject: [PATCH 103/178] Add statsName field on stream while constructing PersistedStateStats (#10964) Signed-off-by: Aman Khare Co-authored-by: Aman Khare --- .../remote/RemoteClusterStateServiceIT.java | 70 ++++++++++++++++--- .../coordination/PersistedStateStats.java | 8 ++- .../cluster/node/stats/NodeStatsTests.java | 1 + 3 files changed, 69 insertions(+), 10 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java index 59eef3c06844b..dcf695d5366ba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -114,6 +114,31 @@ public void testRemoteStateStats() { .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) .get(); + // assert cluster state stats + assertClusterManagerClusterStateStats(nodesStatsResponse); + + NodesStatsResponse nodesStatsResponseDataNode = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + // assert cluster state stats for data node + DiscoveryStats dataNodeDiscoveryStats = nodesStatsResponseDataNode.getNodes().get(0).getDiscoveryStats(); + assertNotNull(dataNodeDiscoveryStats.getClusterStateStats()); + assertEquals(0, dataNodeDiscoveryStats.getClusterStateStats().getUpdateSuccess()); + + // call nodes/stats with nodeId filter + NodesStatsResponse nodesStatsNodeIdFilterResponse = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .setNodesIds(clusterManagerNode) + .get(); + + assertClusterManagerClusterStateStats(nodesStatsNodeIdFilterResponse); + } + + private void assertClusterManagerClusterStateStats(NodesStatsResponse nodesStatsResponse) { // assert cluster state stats DiscoveryStats discoveryStats = nodesStatsResponse.getNodes().get(0).getDiscoveryStats(); @@ -125,16 +150,43 @@ public void testRemoteStateStats() { assertTrue(discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getSuccessCount() > 1); assertEquals(0, discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getFailedCount()); assertTrue(discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getTotalTimeInMillis() > 0); + } - NodesStatsResponse nodesStatsResponseDataNode = client().admin() - .cluster() - .prepareNodesStats(dataNode) - .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) - .get(); - // assert cluster state stats for data node - DiscoveryStats dataNodeDiscoveryStats = nodesStatsResponseDataNode.getNodes().get(0).getDiscoveryStats(); - assertNotNull(dataNodeDiscoveryStats.getClusterStateStats()); - assertEquals(0, dataNodeDiscoveryStats.getClusterStateStats().getUpdateSuccess()); + public void testRemoteStateStatsFromAllNodes() { + int shardCount = randomIntBetween(1, 5); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 3; + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + String[] allNodes = internalCluster().getNodeNames(); + // call _nodes/stats/discovery from all the nodes + for (String node : allNodes) { + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(node) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + validateNodesStatsResponse(nodesStatsResponse); + } + + // call _nodes/stats/discovery from all the nodes with random nodeId filter + for (String node : allNodes) { + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(node) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .setNodesIds(allNodes[randomIntBetween(0, allNodes.length - 1)]) + .get(); + validateNodesStatsResponse(nodesStatsResponse); + } + } + + private void validateNodesStatsResponse(NodesStatsResponse nodesStatsResponse) { + // _nodes/stats/discovery must never fail due to any exception + assertFalse(nodesStatsResponse.toString().contains("exception")); + assertNotNull(nodesStatsResponse.getNodes()); + assertNotNull(nodesStatsResponse.getNodes().get(0)); + assertNotNull(nodesStatsResponse.getNodes().get(0).getDiscoveryStats()); } private void setReplicaCount(int replicaCount) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java index 1dc20e564ade2..4d466c4b3ad73 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java @@ -25,7 +25,7 @@ * @opensearch.internal */ public class PersistedStateStats implements Writeable, ToXContentObject { - private String statsName; + private final String statsName; private AtomicLong totalTimeInMillis = new AtomicLong(0); private AtomicLong failedCount = new AtomicLong(0); private AtomicLong successCount = new AtomicLong(0); @@ -37,6 +37,7 @@ public PersistedStateStats(String statsName) { @Override public void writeTo(StreamOutput out) throws IOException { + out.writeString(statsName); out.writeVLong(successCount.get()); out.writeVLong(failedCount.get()); out.writeVLong(totalTimeInMillis.get()); @@ -53,6 +54,7 @@ public void writeTo(StreamOutput out) throws IOException { } public PersistedStateStats(StreamInput in) throws IOException { + this.statsName = in.readString(); this.successCount = new AtomicLong(in.readVLong()); this.failedCount = new AtomicLong(in.readVLong()); this.totalTimeInMillis = new AtomicLong(in.readVLong()); @@ -113,6 +115,10 @@ protected void addToExtendedFields(String extendedField, AtomicLong extendedFiel this.extendedFields.put(extendedField, extendedFieldValue); } + public String getStatsName() { + return statsName; + } + /** * Fields for parsing and toXContent * diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 3050d1674a95b..80f4ebf5d737a 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -368,6 +368,7 @@ public void testSerialization() throws IOException { .getPersistenceStats() .get(0); PersistedStateStats remoteStateStats = stateStats.getPersistenceStats().get(0); + assertEquals(remoteStateStats.getStatsName(), deserializedRemoteStateStats.getStatsName()); assertEquals(remoteStateStats.getFailedCount(), deserializedRemoteStateStats.getFailedCount()); assertEquals(remoteStateStats.getSuccessCount(), deserializedRemoteStateStats.getSuccessCount()); assertEquals(remoteStateStats.getTotalTimeInMillis(), deserializedRemoteStateStats.getTotalTimeInMillis()); From 73bbeb57a8a56de0d1f9cf69e711a8a0ff26afe5 Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Sun, 29 Oct 2023 10:20:44 +0530 Subject: [PATCH 104/178] Restore ClusterState version during remote state restore (#10853) * Restore ClusterState version during remote state restore Signed-off-by: bansvaru --- CHANGELOG.md | 1 + .../remote/RemoteClusterStateServiceIT.java | 4 +- .../RemoteStoreClusterStateRestoreIT.java | 53 +++++++++++++++++-- .../remote/RemoteClusterStateService.java | 11 ++-- .../recovery/RemoteStoreRestoreService.java | 17 +++--- .../RemoteClusterStateServiceTests.java | 34 ++++++++---- 6 files changed, 93 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 34fd573b295b3..020fb5bda8b8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) - [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286)) - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) +- [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java index dcf695d5366ba..dfde1b958882c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -90,10 +90,10 @@ public void testFullClusterRestoreStaleDelete() throws Exception { assertEquals(10, repository.blobStore().blobContainer(baseMetadataPath.add("manifest")).listBlobsByPrefix("manifest").size()); - Map indexMetadataMap = remoteClusterStateService.getLatestMetadata( + Map indexMetadataMap = remoteClusterStateService.getLatestClusterState( cluster().getClusterName(), getClusterState().metadata().clusterUUID() - ).getIndices(); + ).getMetadata().getIndices(); assertEquals(0, indexMetadataMap.values().stream().findFirst().get().getNumberOfReplicas()); assertEquals(shardCount, indexMetadataMap.values().stream().findFirst().get().getNumberOfShards()); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index e9afd6d36bb87..c61e2ec6e4f6c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -30,6 +30,7 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.concurrent.ExecutionException; @@ -85,6 +86,7 @@ public void testFullClusterRestore() throws Exception { // Step - 1 index some data to generate files in remote directory Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata resetCluster(dataNodeCount, clusterManagerNodeCount); @@ -92,9 +94,17 @@ public void testFullClusterRestore() throws Exception { String newClusterUUID = clusterService().state().metadata().clusterUUID(); assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; - // Step - 3 Trigger full cluster restore and validate + // Step - 3 validate cluster state restored + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); validateMetadata(List.of(INDEX_NAME)); verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); + } /** @@ -121,6 +131,7 @@ public void testFullClusterRestoreDoesntFailWithConflictingLocalState() throws E // index some data to generate files in remote directory Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); // stop all nodes internalCluster().stopAllNodes(); @@ -156,6 +167,14 @@ public Settings onNodeStopped(String nodeName) { newClusterUUID = clusterService().state().metadata().clusterUUID(); assert !Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID) : "cluster restart not successful. cluster uuid is still unknown"; assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); validateMetadata(List.of(INDEX_NAME)); // start data nodes to trigger index data recovery @@ -180,6 +199,7 @@ public void testFullClusterRestoreMultipleIndices() throws Exception { updateIndexBlock(true, secondIndexName); String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata resetCluster(dataNodeCount, clusterManagerNodeCount); @@ -187,7 +207,14 @@ public void testFullClusterRestoreMultipleIndices() throws Exception { String newClusterUUID = clusterService().state().metadata().clusterUUID(); assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; - // Step - 3 Trigger full cluster restore + // Step - 3 validate cluster state restored + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); validateMetadata(List.of(INDEX_NAME, secondIndexName)); verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false); verifyRedIndicesAndTriggerRestore(indexStats2, secondIndexName, false); @@ -239,6 +266,7 @@ public void testRemoteStateFullRestart() throws Exception { Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); // Delete index metadata file in remote try { Files.move( @@ -257,6 +285,14 @@ public void testRemoteStateFullRestart() throws Exception { ensureGreen(INDEX_NAME); String newClusterUUID = clusterService().state().metadata().clusterUUID(); assert Objects.equals(newClusterUUID, prevClusterUUID) : "Full restart not successful. cluster uuid has changed"; + + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); validateCurrentMetadata(); verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); } @@ -309,6 +345,7 @@ public void testFullClusterRestoreGlobalMetadata() throws Exception { // Step - 1 index some data to generate files in remote directory Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); // Create global metadata - register a custom repo Path repoPath = registerCustomRepository(); @@ -328,8 +365,16 @@ public void testFullClusterRestoreGlobalMetadata() throws Exception { String newClusterUUID = clusterService().state().metadata().clusterUUID(); assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; - // Step - 3 Trigger full cluster restore and validate - // validateCurrentMetadata(); + // Step - 3 validate cluster state restored + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + + validateCurrentMetadata(); assertEquals(Integer.valueOf(34), SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(clusterService().state().metadata().settings())); assertEquals(true, SETTING_READ_ONLY_SETTING.get(clusterService().state().metadata().settings())); assertTrue(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index b3309b1fd8a63..205ae12cf6214 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -767,16 +767,16 @@ private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, U } /** - * Fetch latest metadata from remote cluster state including global metadata and index metadata + * Fetch latest ClusterState from remote, including global metadata, index metadata and cluster state version * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster * @return {@link IndexMetadata} */ - public Metadata getLatestMetadata(String clusterName, String clusterUUID) { + public ClusterState getLatestClusterState(String clusterName, String clusterUUID) { start(); Optional clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); - if (!clusterMetadataManifest.isPresent()) { + if (clusterMetadataManifest.isEmpty()) { throw new IllegalStateException( String.format(Locale.ROOT, "Latest cluster metadata manifest is not present for the provided clusterUUID: %s", clusterUUID) ); @@ -790,7 +790,10 @@ public Metadata getLatestMetadata(String clusterName, String clusterUUID) { Map indexMetadataMap = new HashMap<>(); indices.values().forEach(indexMetadata -> { indexMetadataMap.put(indexMetadata.getIndex().getName(), indexMetadata); }); - return Metadata.builder(globalMetadata).indices(indexMetadataMap).build(); + return ClusterState.builder(ClusterState.EMPTY_STATE) + .version(clusterMetadataManifest.get().getStateVersion()) + .metadata(Metadata.builder(globalMetadata).indices(indexMetadataMap).build()) + .build(); } private Metadata getGlobalMetadata(String clusterName, String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index aebd7d2ea201a..23bb4cea17a20 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -138,7 +138,7 @@ public RemoteRestoreResult restore( String[] indexNames ) { Map> indexMetadataMap = new HashMap<>(); - Metadata remoteMetadata = null; + ClusterState remoteState = null; boolean metadataFromRemoteStore = (restoreClusterUUID == null || restoreClusterUUID.isEmpty() || restoreClusterUUID.isBlank()) == false; @@ -150,8 +150,8 @@ public RemoteRestoreResult restore( throw new IllegalArgumentException("clusterUUID to restore from should be different from current cluster UUID"); } logger.info("Restoring cluster state from remote store from cluster UUID : [{}]", restoreClusterUUID); - remoteMetadata = remoteClusterStateService.getLatestMetadata(currentState.getClusterName().value(), restoreClusterUUID); - remoteMetadata.getIndices().values().forEach(indexMetadata -> { + remoteState = remoteClusterStateService.getLatestClusterState(currentState.getClusterName().value(), restoreClusterUUID); + remoteState.getMetadata().getIndices().values().forEach(indexMetadata -> { indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); }); } catch (Exception e) { @@ -177,7 +177,7 @@ public RemoteRestoreResult restore( } } } - return executeRestore(currentState, indexMetadataMap, restoreAllShards, remoteMetadata); + return executeRestore(currentState, indexMetadataMap, restoreAllShards, remoteState); } /** @@ -191,7 +191,7 @@ private RemoteRestoreResult executeRestore( ClusterState currentState, Map> indexMetadataMap, boolean restoreAllShards, - Metadata remoteMetadata + ClusterState remoteState ) { final String restoreUUID = UUIDs.randomBase64UUID(); List indicesToBeRestored = new ArrayList<>(); @@ -241,8 +241,11 @@ private RemoteRestoreResult executeRestore( totalShards += updatedIndexMetadata.getNumberOfShards(); } - if (remoteMetadata != null) { - restoreGlobalMetadata(mdBuilder, remoteMetadata); + if (remoteState != null) { + restoreGlobalMetadata(mdBuilder, remoteState.getMetadata()); + // Restore ClusterState version + logger.info("Restoring ClusterState with Remote State version [{}]", remoteState.version()); + builder.version(remoteState.version()); } RestoreInfo restoreInfo = new RestoreInfo("remote_store", indicesToBeRestored, totalShards, totalShards); diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 586618bd1ecff..4efd1b8a62970 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -665,7 +665,8 @@ public void testReadLatestMetadataManifestSuccessButNoIndexMetadata() throws IOE remoteClusterStateService.start(); assertEquals( - remoteClusterStateService.getLatestMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + remoteClusterStateService.getLatestClusterState(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + .getMetadata() .getIndices() .size(), 0 @@ -694,8 +695,10 @@ public void testReadLatestMetadataManifestSuccessButIndexMetadataFetchIOExceptio remoteClusterStateService.start(); Exception e = assertThrows( IllegalStateException.class, - () -> remoteClusterStateService.getLatestMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) - .getIndices() + () -> remoteClusterStateService.getLatestClusterState( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ).getMetadata().getIndices() ); assertEquals(e.getMessage(), "Error while downloading IndexMetadata - " + uploadedIndexMetadata.getUploadedFilename()); } @@ -740,10 +743,11 @@ public void testReadGlobalMetadata() throws IOException { final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); remoteClusterStateService.start(); + long prevClusterStateVersion = 13L; final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() .indices(List.of()) .clusterTerm(1L) - .stateVersion(1L) + .stateVersion(prevClusterStateVersion) .stateUUID("state-uuid") .clusterUUID("cluster-uuid") .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) @@ -756,12 +760,20 @@ public void testReadGlobalMetadata() throws IOException { Metadata expactedMetadata = Metadata.builder().persistentSettings(Settings.builder().put("readonly", true).build()).build(); mockBlobContainerForGlobalMetadata(mockBlobStoreObjects(), expectedManifest, expactedMetadata); - Metadata metadata = remoteClusterStateService.getLatestMetadata( + ClusterState newClusterState = remoteClusterStateService.getLatestClusterState( clusterState.getClusterName().value(), clusterState.metadata().clusterUUID() ); - assertTrue(Metadata.isGlobalStateEquals(metadata, expactedMetadata)); + assertTrue(Metadata.isGlobalStateEquals(newClusterState.getMetadata(), expactedMetadata)); + + long newClusterStateVersion = newClusterState.getVersion(); + assert prevClusterStateVersion == newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is not equal to current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); } public void testReadGlobalMetadataIOException() throws IOException { @@ -793,7 +805,10 @@ public void testReadGlobalMetadataIOException() throws IOException { remoteClusterStateService.start(); Exception e = assertThrows( IllegalStateException.class, - () -> remoteClusterStateService.getLatestMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + () -> remoteClusterStateService.getLatestClusterState( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ) ); assertEquals(e.getMessage(), "Error while downloading Global Metadata - " + globalIndexMetadataName); } @@ -824,16 +839,15 @@ public void testReadLatestIndexMetadataSuccess() throws IOException { .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID("prev-cluster-uuid") - .globalMetadataFileName("global-metadata-file") .codecVersion(ClusterMetadataManifest.CODEC_V0) .build(); mockBlobContainer(mockBlobStoreObjects(), expectedManifest, Map.of(index.getUUID(), indexMetadata)); - Map indexMetadataMap = remoteClusterStateService.getLatestMetadata( + Map indexMetadataMap = remoteClusterStateService.getLatestClusterState( clusterState.getClusterName().value(), clusterState.metadata().clusterUUID() - ).getIndices(); + ).getMetadata().getIndices(); assertEquals(indexMetadataMap.size(), 1); assertEquals(indexMetadataMap.get(index.getName()).getIndex().getName(), index.getName()); From 84be8c9207cf1153b2eb8dfaf77cf737959781cc Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Mon, 30 Oct 2023 12:55:01 +0530 Subject: [PATCH 105/178] Use async write for manifest file and use latch for timeout (#10968) * Use async write for manifest file and use latch for timeout Signed-off-by: Dhwanil Patel --- .../common/settings/ClusterSettings.java | 1 + .../remote/RemoteClusterStateService.java | 91 +++++++++++++------ .../RemoteClusterStateServiceTests.java | 84 ++++++++++++++--- 3 files changed, 137 insertions(+), 39 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index c2c6effc3336f..3a1fff21db366 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -684,6 +684,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, + RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 205ae12cf6214..c892b475d71da 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -87,6 +87,8 @@ public class RemoteClusterStateService implements Closeable { public static final TimeValue GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + public static final TimeValue METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + public static final Setting INDEX_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( "cluster.remote_store.state.index_metadata.upload_timeout", INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, @@ -101,6 +103,13 @@ public class RemoteClusterStateService implements Closeable { Setting.Property.NodeScope ); + public static final Setting METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.metadata_manifest.upload_timeout", + METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final ChecksumBlobStoreFormat INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "index-metadata", METADATA_NAME_FORMAT, @@ -157,6 +166,7 @@ public class RemoteClusterStateService implements Closeable { private volatile TimeValue indexMetadataUploadTimeout; private volatile TimeValue globalMetadataUploadTimeout; + private volatile TimeValue metadataManifestUploadTimeout; private final AtomicBoolean deleteStaleMetadataRunning = new AtomicBoolean(false); private final RemotePersistenceStats remoteStateStats; @@ -190,9 +200,11 @@ public RemoteClusterStateService( this.slowWriteLoggingThreshold = clusterSettings.get(SLOW_WRITE_LOGGING_THRESHOLD); this.indexMetadataUploadTimeout = clusterSettings.get(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING); this.globalMetadataUploadTimeout = clusterSettings.get(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING); + this.metadataManifestUploadTimeout = clusterSettings.get(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING); clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold); clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); + clusterSettings.addSettingsUpdateConsumer(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, this::setMetadataManifestUploadTimeout); this.remoteStateStats = new RemotePersistenceStats(); } @@ -401,13 +413,13 @@ private String writeGlobalMetadata(ClusterState clusterState) throws IOException try { if (latch.await(getGlobalMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { // TODO: We should add metrics where transfer is timing out. [Issue: #10687] - GlobalMetadataTransferException ex = new GlobalMetadataTransferException( + RemoteStateTransferException ex = new RemoteStateTransferException( String.format(Locale.ROOT, "Timed out waiting for transfer of global metadata to complete") ); throw ex; } } catch (InterruptedException ex) { - GlobalMetadataTransferException exception = new GlobalMetadataTransferException( + RemoteStateTransferException exception = new RemoteStateTransferException( String.format(Locale.ROOT, "Timed out waiting for transfer of global metadata to complete - %s"), ex ); @@ -415,7 +427,7 @@ private String writeGlobalMetadata(ClusterState clusterState) throws IOException throw exception; } if (exceptionReference.get() != null) { - throw new GlobalMetadataTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); + throw new RemoteStateTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); } return result.get(); } @@ -440,7 +452,7 @@ private List writeIndexMetadataParallel(ClusterState clus ); result.add(uploadedIndexMetadata); }, ex -> { - assert ex instanceof IndexMetadataTransferException; + assert ex instanceof RemoteStateTransferException; logger.error( () -> new ParameterizedMessage("Exception during transfer of IndexMetadata to Remote {}", ex.getMessage()), ex @@ -457,7 +469,7 @@ private List writeIndexMetadataParallel(ClusterState clus try { if (latch.await(getIndexMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { - IndexMetadataTransferException ex = new IndexMetadataTransferException( + RemoteStateTransferException ex = new RemoteStateTransferException( String.format( Locale.ROOT, "Timed out waiting for transfer of index metadata to complete - %s", @@ -469,7 +481,7 @@ private List writeIndexMetadataParallel(ClusterState clus } } catch (InterruptedException ex) { exceptionList.forEach(ex::addSuppressed); - IndexMetadataTransferException exception = new IndexMetadataTransferException( + RemoteStateTransferException exception = new RemoteStateTransferException( String.format( Locale.ROOT, "Timed out waiting for transfer of index metadata to complete - %s", @@ -481,7 +493,7 @@ private List writeIndexMetadataParallel(ClusterState clus throw exception; } if (exceptionList.size() > 0) { - IndexMetadataTransferException exception = new IndexMetadataTransferException( + RemoteStateTransferException exception = new RemoteStateTransferException( String.format( Locale.ROOT, "Exception during transfer of IndexMetadata to Remote %s", @@ -520,7 +532,7 @@ private void writeIndexMetadataAsync( indexMetadataContainer.path().buildAsString() + indexMetadataFilename ) ), - ex -> latchedActionListener.onFailure(new IndexMetadataTransferException(indexMetadata.getIndex().toString(), ex)) + ex -> latchedActionListener.onFailure(new RemoteStateTransferException(indexMetadata.getIndex().toString(), ex)) ); INDEX_METADATA_FORMAT.writeAsyncWithUrgentPriority( @@ -601,14 +613,45 @@ private ClusterMetadataManifest uploadManifest( private void writeMetadataManifest(String clusterName, String clusterUUID, ClusterMetadataManifest uploadManifest, String fileName) throws IOException { + AtomicReference result = new AtomicReference(); + AtomicReference exceptionReference = new AtomicReference(); + final BlobContainer metadataManifestContainer = manifestContainer(clusterName, clusterUUID); - CLUSTER_METADATA_MANIFEST_FORMAT.write( + + // latch to wait until upload is not finished + CountDownLatch latch = new CountDownLatch(1); + + LatchedActionListener completionListener = new LatchedActionListener<>(ActionListener.wrap(resp -> { + logger.trace(String.format(Locale.ROOT, "Manifest file uploaded successfully.")); + }, ex -> { exceptionReference.set(ex); }), latch); + + CLUSTER_METADATA_MANIFEST_FORMAT.writeAsyncWithUrgentPriority( uploadManifest, metadataManifestContainer, fileName, blobStoreRepository.getCompressor(), + completionListener, FORMAT_PARAMS ); + + try { + if (latch.await(getMetadataManifestUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete") + ); + throw ex; + } + } catch (InterruptedException ex) { + RemoteStateTransferException exception = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete - %s"), + ex + ); + Thread.currentThread().interrupt(); + throw exception; + } + if (exceptionReference.get() != null) { + throw new RemoteStateTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); + } logger.debug( "Metadata manifest file [{}] written during [{}] phase. ", fileName, @@ -668,6 +711,10 @@ private void setGlobalMetadataUploadTimeout(TimeValue newGlobalMetadataUploadTim this.globalMetadataUploadTimeout = newGlobalMetadataUploadTimeout; } + private void setMetadataManifestUploadTimeout(TimeValue newMetadataManifestUploadTimeout) { + this.metadataManifestUploadTimeout = newMetadataManifestUploadTimeout; + } + public TimeValue getIndexMetadataUploadTimeout() { return this.indexMetadataUploadTimeout; } @@ -676,6 +723,10 @@ public TimeValue getGlobalMetadataUploadTimeout() { return this.globalMetadataUploadTimeout; } + public TimeValue getMetadataManifestUploadTimeout() { + return this.metadataManifestUploadTimeout; + } + static String getManifestFileName(long term, long version, boolean committed) { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest______C/P____ return String.join( @@ -1088,29 +1139,15 @@ public void writeMetadataFailed() { } /** - * Exception for IndexMetadata transfer failures to remote - */ - static class IndexMetadataTransferException extends RuntimeException { - - public IndexMetadataTransferException(String errorDesc) { - super(errorDesc); - } - - public IndexMetadataTransferException(String errorDesc, Throwable cause) { - super(errorDesc, cause); - } - } - - /** - * Exception for GlobalMetadata transfer failures to remote + * Exception for Remote state transfer. */ - static class GlobalMetadataTransferException extends RuntimeException { + static class RemoteStateTransferException extends RuntimeException { - public GlobalMetadataTransferException(String errorDesc) { + public RemoteStateTransferException(String errorDesc) { super(errorDesc); } - public GlobalMetadataTransferException(String errorDesc, Throwable cause) { + public RemoteStateTransferException(String errorDesc, Throwable cause) { super(errorDesc, cause); } } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 4efd1b8a62970..65477051cdb30 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -68,6 +68,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Stream; @@ -230,10 +231,17 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); ArgumentCaptor writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); - + AtomicReference capturedWriteContext = new AtomicReference<>(); doAnswer((i) -> { actionListenerArgumentCaptor.getValue().onResponse(null); return null; + }).doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + capturedWriteContext.set(writeContextArgumentCaptor.getValue()); + return null; + }).doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; }).when(container).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); remoteClusterStateService.start(); @@ -262,27 +270,30 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); - assertEquals(actionListenerArgumentCaptor.getAllValues().size(), 2); - assertEquals(writeContextArgumentCaptor.getAllValues().size(), 2); + assertEquals(actionListenerArgumentCaptor.getAllValues().size(), 3); + assertEquals(writeContextArgumentCaptor.getAllValues().size(), 3); - WriteContext capturedWriteContext = writeContextArgumentCaptor.getValue(); - byte[] writtenBytes = capturedWriteContext.getStreamProvider(Integer.MAX_VALUE).provideStream(0).getInputStream().readAllBytes(); + byte[] writtenBytes = capturedWriteContext.get() + .getStreamProvider(Integer.MAX_VALUE) + .provideStream(0) + .getInputStream() + .readAllBytes(); IndexMetadata writtenIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.deserialize( - capturedWriteContext.getFileName(), + capturedWriteContext.get().getFileName(), blobStoreRepository.getNamedXContentRegistry(), new BytesArray(writtenBytes) ); - assertEquals(capturedWriteContext.getWritePriority(), WritePriority.URGENT); + assertEquals(capturedWriteContext.get().getWritePriority(), WritePriority.URGENT); assertEquals(writtenIndexMetadata.getNumberOfShards(), 1); assertEquals(writtenIndexMetadata.getNumberOfReplicas(), 0); assertEquals(writtenIndexMetadata.getIndex().getName(), "test-index"); assertEquals(writtenIndexMetadata.getIndex().getUUID(), "index-uuid"); long expectedChecksum = RemoteTransferContainer.checksumOfChecksum(new ByteArrayIndexInput("metadata-filename", writtenBytes), 8); - if (capturedWriteContext.doRemoteDataIntegrityCheck()) { - assertEquals(capturedWriteContext.getExpectedChecksum().longValue(), expectedChecksum); + if (capturedWriteContext.get().doRemoteDataIntegrityCheck()) { + assertEquals(capturedWriteContext.get().getExpectedChecksum().longValue(), expectedChecksum); } else { - assertEquals(capturedWriteContext.getExpectedChecksum(), null); + assertEquals(capturedWriteContext.get().getExpectedChecksum(), null); } } @@ -306,11 +317,44 @@ public void run() { remoteClusterStateService.start(); assertThrows( - RemoteClusterStateService.GlobalMetadataTransferException.class, + RemoteClusterStateService.RemoteStateTransferException.class, () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) ); } + public void testTimeoutWhileWritingManifestFile() throws IOException { + // verify update metadata manifest upload timeout + int metadataManifestUploadTimeout = 2; + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.metadata_manifest.upload_timeout", metadataManifestUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + + doAnswer((i) -> { // For Global Metadata + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).doAnswer((i) -> { // For Index Metadata + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).doAnswer((i) -> { + // For Manifest file perform No Op, so latch in code will timeout + return null; + }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); + + remoteClusterStateService.start(); + try { + remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)); + } catch (Exception e) { + assertTrue(e instanceof RemoteClusterStateService.RemoteStateTransferException); + assertTrue(e.getMessage().contains("Timed out waiting for transfer of manifest file to complete")); + } + } + public void testWriteFullMetadataInParallelFailureForIndexMetadata() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); @@ -327,7 +371,7 @@ public void testWriteFullMetadataInParallelFailureForIndexMetadata() throws IOEx remoteClusterStateService.start(); assertThrows( - RemoteClusterStateService.IndexMetadataTransferException.class, + RemoteClusterStateService.RemoteStateTransferException.class, () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) ); assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); @@ -1142,6 +1186,22 @@ public void testIndexMetadataUploadWaitTimeSetting() { assertEquals(indexMetadataUploadTimeout, remoteClusterStateService.getIndexMetadataUploadTimeout().seconds()); } + public void testMetadataManifestUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT, + remoteClusterStateService.getMetadataManifestUploadTimeout() + ); + + // verify update metadata manifest upload timeout + int metadataManifestUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.metadata_manifest.upload_timeout", metadataManifestUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(metadataManifestUploadTimeout, remoteClusterStateService.getMetadataManifestUploadTimeout().seconds()); + } + public void testGlobalMetadataUploadWaitTimeSetting() { // verify default value assertEquals( From 4efa6d7a8dfdaba93571785315eeeb24956917f5 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Mon, 30 Oct 2023 14:58:59 +0530 Subject: [PATCH 106/178] Read the same medata file that is locked during restore of shallow snapshot (#10979) Signed-off-by: Sachin Kale --- .../remotestore/RemoteRestoreSnapshotIT.java | 67 +++++++++++++++++++ .../opensearch/index/shard/IndexShard.java | 3 +- .../opensearch/index/shard/StoreRecovery.java | 5 ++ .../store/RemoteSegmentStoreDirectory.java | 6 +- .../RemoteStoreMetadataLockManager.java | 16 +++++ 5 files changed, 93 insertions(+), 4 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index ad78c503a4a19..21ce4be9981fb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -589,4 +589,71 @@ public void testRestoreShallowSnapshotRepository() throws ExecutionException, In assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); } + public void testRestoreShallowSnapshotIndexAfterSnapshot() throws ExecutionException, InterruptedException { + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + String[] pathTokens = absolutePath1.toString().split("/"); + String basePath = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location = PathUtils.get(String.join("/", pathTokens)); + pathTokens = absolutePath2.toString().split("/"); + String basePath2 = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location2 = PathUtils.get(String.join("/", pathTokens)); + logger.info("Path 1 [{}]", absolutePath1); + logger.info("Path 2 [{}]", absolutePath2); + String restoredIndexName1 = indexName1 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(location, basePath, true)); + + Client client = client(); + Settings indexSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + createIndex(indexName1, indexSettings); + + int numDocsInIndex1 = randomIntBetween(2, 5); + indexDocuments(client, indexName1, numDocsInIndex1); + + ensureGreen(indexName1); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(List.of(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + int extraNumDocsInIndex1 = randomIntBetween(20, 50); + indexDocuments(client, indexName1, extraNumDocsInIndex1); + refresh(indexName1); + + client().admin().indices().close(Requests.closeIndexRequest(indexName1)).get(); + createRepository(remoteStoreRepoNameUpdated, "fs", remoteRepoPath); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .setSourceRemoteStoreRepository(remoteStoreRepoNameUpdated) + .get(); + + assertTrue(restoreSnapshotResponse2.getRestoreInfo().failedShards() == 0); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + } + } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 32396f1a3df2e..cf42c6749fc79 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4908,8 +4908,7 @@ public void syncSegmentsFromGivenRemoteSegmentStore( remoteStore.incRef(); } Map uploadedSegments = sourceRemoteDirectory - .initializeToSpecificCommit(primaryTerm, commitGeneration) - .getMetadata(); + .getSegmentsUploadedToRemoteStore(); final Directory storeDirectory = store.directory(); store.incRef(); diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index e823401e5ef7e..5b1940bb1d9a5 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -410,6 +410,11 @@ void recoverFromSnapshotAndRemoteStore( indexUUID, shardId ); + sourceRemoteDirectory.initializeToSpecificCommit( + primaryTerm, + commitGeneration, + recoverySource.snapshot().getSnapshotId().getUUID() + ); indexShard.syncSegmentsFromGivenRemoteSegmentStore(true, sourceRemoteDirectory, primaryTerm, commitGeneration); final Store store = indexShard.store(); if (indexShard.indexSettings.isRemoteTranslogStoreEnabled() == false) { diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index be1f2341236ab..988d52202f975 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -34,6 +34,7 @@ import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreCommitLevelLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; @@ -160,8 +161,9 @@ public RemoteSegmentMetadata init() throws IOException { * * @throws IOException if there were any failures in reading the metadata file */ - public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long commitGeneration) throws IOException { - String metadataFile = getMetadataFileForCommit(primaryTerm, commitGeneration); + public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long commitGeneration, String acquirerId) throws IOException { + String metadataFilePrefix = MetadataFilenameUtils.getMetadataFilePrefixForCommit(primaryTerm, commitGeneration); + String metadataFile = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLock(metadataFilePrefix, acquirerId); RemoteSegmentMetadata remoteSegmentMetadata = readMetadataFile(metadataFile); if (remoteSegmentMetadata != null) { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java index fd7906729e314..756905d02229a 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java @@ -14,10 +14,13 @@ import org.apache.lucene.store.IndexOutput; import org.opensearch.index.store.RemoteBufferedOutputDirectory; +import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.NoSuchFileException; import java.util.Collection; +import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; /** * A Class that implements Remote Store Lock Manager by creating lock files for the remote store files that needs to @@ -70,6 +73,19 @@ public void release(LockInfo lockInfo) throws IOException { } } + public String fetchLock(String filenamePrefix, String acquirerId) throws IOException { + Collection lockFiles = lockDirectory.listFilesByPrefix(filenamePrefix); + List lockFilesForAcquirer = lockFiles.stream() + .filter(lockFile -> acquirerId.equals(FileLockInfo.LockFileUtils.getAcquirerIdFromLock(lockFile))) + .map(FileLockInfo.LockFileUtils::getFileToLockNameFromLock) + .collect(Collectors.toList()); + if (lockFilesForAcquirer.size() == 0) { + throw new FileNotFoundException("No lock file found for prefix: " + filenamePrefix + " and acquirerId: " + acquirerId); + } + assert lockFilesForAcquirer.size() == 1; + return lockFilesForAcquirer.get(0); + } + /** * Checks whether a given file have any lock on it or not. * @param lockInfo File Lock Info instance for which we need to check if lock is acquired. From 0d7d1e9db7ace8f6e90ab98ade88d719efaf37e1 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 30 Oct 2023 09:15:17 -0400 Subject: [PATCH 107/178] Update bundled JDK to JDK-21.0.1 (#10576) Signed-off-by: Andriy Redko --- .../java/org/opensearch/gradle/test/DistroTestPlugin.java | 4 ++-- buildSrc/version.properties | 4 +--- distribution/src/config/jvm.options | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index a420c8b63b02c..1ad7e056b6ae6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -77,9 +77,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "11.0.20+8"; + private static final String SYSTEM_JDK_VERSION = "17.0.9+9"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "17.0.8+7"; + private static final String GRADLE_JDK_VERSION = "17.0.9+9"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 96d398c35851d..0d98cba35448f 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -2,9 +2,7 @@ opensearch = 3.0.0 lucene = 9.8.0 bundled_jdk_vendor = adoptium -bundled_jdk = 20.0.2+9 -# See please https://github.com/adoptium/temurin-build/issues/3371 -bundled_jdk_linux_ppc64le = 20+36 +bundled_jdk = 21.0.1+12 # optional dependencies spatial4j = 0.7 diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 952110c6c0289..1a0abcbaf9c88 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -81,7 +81,7 @@ ${error.file} # JDK 20+ Incubating Vector Module for SIMD optimizations; # disabling may reduce performance on vector optimized lucene -20:--add-modules=jdk.incubator.vector +20-:--add-modules=jdk.incubator.vector # HDFS ForkJoinPool.common() support by SecurityManager -Djava.util.concurrent.ForkJoinPool.common.threadFactory=org.opensearch.secure_sm.SecuredForkJoinWorkerThreadFactory From 7c917c5628d8c33a7316bd30e7b8c5c70419f8a1 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Mon, 30 Oct 2023 19:06:51 +0530 Subject: [PATCH 108/178] Log more info with max seq no on upload to and download from remote translog (#10973) * Log more info with max seq no on upload to and download from remote translog Signed-off-by: Sachin Kale * Change log level to debug Signed-off-by: Sachin Kale * Spotless fixes Signed-off-by: Sachin Kale * Fix UTs Signed-off-by: Sachin Kale --------- Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../index/translog/RemoteFsTranslog.java | 31 +++++++++++++------ 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index a305a774f5854..65d16e213cad1 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -103,6 +103,7 @@ public RemoteFsTranslog( try { download(translogTransferManager, location, logger); Checkpoint checkpoint = readCheckpoint(location); + logger.info("Downloaded data from remote translog till maxSeqNo = {}", checkpoint.maxSeqNo); this.readers.addAll(recoverFromFiles(checkpoint)); if (readers.isEmpty()) { String errorMsg = String.format(Locale.ROOT, "%s at least one reader must be recovered", shardId); @@ -266,9 +267,13 @@ public void rollGeneration() throws IOException { } private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOException { + long maxSeqNo = -1; try (Releasable ignored = writeLock.acquire()) { if (generation == null || generation == current.getGeneration()) { try { + if (closed.get() == false) { + maxSeqNo = getMaxSeqNo(); + } final TranslogReader reader = current.closeIntoReader(); readers.add(reader); copyCheckpointTo(location.resolve(getCommitCheckpointFileName(current.getGeneration()))); @@ -300,17 +305,17 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc // is not updated in remote translog except in primary to primary recovery. if (generation == null) { if (closed.get() == false) { - return upload(primaryTerm, current.getGeneration() - 1); + return upload(primaryTerm, current.getGeneration() - 1, maxSeqNo); } else { - return upload(primaryTerm, current.getGeneration()); + return upload(primaryTerm, current.getGeneration(), maxSeqNo); } } else { - return upload(primaryTerm, generation); + return upload(primaryTerm, generation, maxSeqNo); } } } - private boolean upload(Long primaryTerm, Long generation) throws IOException { + private boolean upload(long primaryTerm, long generation, long maxSeqNo) throws IOException { // During primary relocation (primary-primary peer recovery), both the old and the new primary have engine // created with the RemoteFsTranslog. Both primaries are equipped to upload the translogs. The primary mode check // below ensures that the real primary only is uploading. Before the primary mode is set as true for the new @@ -334,7 +339,7 @@ private boolean upload(Long primaryTerm, Long generation) throws IOException { ) { return translogTransferManager.transferSnapshot( transferSnapshotProvider, - new RemoteFsTranslogTransferListener(generation, primaryTerm) + new RemoteFsTranslogTransferListener(generation, primaryTerm, maxSeqNo) ); } @@ -522,23 +527,31 @@ private class RemoteFsTranslogTransferListener implements TranslogTransferListen /** * Generation for the translog */ - private final Long generation; + private final long generation; /** * Primary Term for the translog */ - private final Long primaryTerm; + private final long primaryTerm; + + private final long maxSeqNo; - RemoteFsTranslogTransferListener(Long generation, Long primaryTerm) { + RemoteFsTranslogTransferListener(long generation, long primaryTerm, long maxSeqNo) { this.generation = generation; this.primaryTerm = primaryTerm; + this.maxSeqNo = maxSeqNo; } @Override public void onUploadComplete(TransferSnapshot transferSnapshot) throws IOException { maxRemoteTranslogGenerationUploaded = generation; minRemoteGenReferenced = getMinFileGeneration(); - logger.trace("uploaded translog for {} {} ", primaryTerm, generation); + logger.debug( + "Successfully uploaded translog for primary term = {}, generation = {}, maxSeqNo = {}", + primaryTerm, + generation, + maxSeqNo + ); } @Override From 448635f77855108afedfe6f2e5c07a2f6c37746c Mon Sep 17 00:00:00 2001 From: Chaitanya Gohel <104654647+gashutos@users.noreply.github.com> Date: Mon, 30 Oct 2023 20:51:16 +0530 Subject: [PATCH 109/178] Disable sort optimization for HALF_FLOAT (#10999) Signed-off-by: Chaitanya Gohel --- CHANGELOG.md | 1 + .../org/opensearch/index/fielddata/IndexNumericFieldData.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 020fb5bda8b8b..c18ff830f84cc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -122,6 +122,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) - Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) - Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) +- [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java index b4e90b8ab570a..6fc074fe0de95 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java @@ -242,7 +242,7 @@ private XFieldComparatorSource comparatorSource( assert !targetNumericType.isFloatingPoint(); source = new IntValuesComparatorSource(this, missingValue, sortMode, nested); } - if (targetNumericType != getNumericType()) { + if (targetNumericType != getNumericType() || getNumericType() == NumericType.HALF_FLOAT) { source.disableSkipping(); // disable skipping logic for cast of sort field } return source; From b147dc1815aaa039365c98932b4c9859a96d70a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 12:50:42 -0400 Subject: [PATCH 110/178] Bump org.apache.logging.log4j:log4j-core from 2.21.0 to 2.21.1 in /buildSrc/src/testKit/thirdPartyAudit/sample_jars (#11000) * Bump org.apache.logging.log4j:log4j-core Bumps org.apache.logging.log4j:log4j-core from 2.21.0 to 2.21.1. --- updated-dependencies: - dependency-name: org.apache.logging.log4j:log4j-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 4 ++-- buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c18ff830f84cc..e388364893345 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -112,7 +112,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) - Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) - Bump `com.squareup.okio:okio` from 3.5.0 to 3.6.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637)) -- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.21.0 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.21.1 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) @@ -138,4 +138,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x \ No newline at end of file diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index 74c88e0961c9c..dca2bce94ea6d 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -17,7 +17,7 @@ repositories { } dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.21.0" + implementation "org.apache.logging.log4j:log4j-core:2.21.1" } ["0.0.1", "0.0.2"].forEach { v -> From da011ba7ec35770bf4aabf1d26f8e257946fc28f Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Mon, 30 Oct 2023 10:09:07 -0700 Subject: [PATCH 111/178] Make search pipelines asynchronous (#10598) * Make search pipelines asynchronous If a search processor needs to make a call out to another service, we should not risk blocking on the transport thread. We should support async execution. Signed-off-by: Michael Froh * Compute pipelineStart before building request callback chain Also, IntelliJ suggested refactoring creation of the terminal request callback into a separate method since the existing method was really big. I liked that suggestion. Signed-off-by: Michael Froh * Rename async methods (put async at end) Signed-off-by: Michael Froh --------- Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + .../action/search/TransportSearchAction.java | 58 +++-- .../opensearch/search/pipeline/Pipeline.java | 206 +++++++++++------- .../search/pipeline/PipelinedRequest.java | 9 +- .../pipeline/SearchPipelineService.java | 3 +- .../pipeline/SearchRequestProcessor.java | 27 +++ .../pipeline/SearchResponseProcessor.java | 27 +++ .../pipeline/SearchPipelineServiceTests.java | 84 +++++-- 8 files changed, 291 insertions(+), 124 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e388364893345..93b4e4e263063 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -118,6 +118,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) - Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) - Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) +- Search pipelines now support asynchronous request and response processors to avoid blocking on a transport thread ([#10598](https://github.com/opensearch-project/OpenSearch/pull/10598)) - [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) - [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) - Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index a6fb8453af4ff..16b7e4810b130 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -506,24 +506,51 @@ private void executeRequest( ActionListener listener; try { searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); - listener = ActionListener.wrap( - r -> originalListener.onResponse(searchRequest.transformResponse(r)), - originalListener::onFailure - ); + listener = searchRequest.transformResponseListener(originalListener); } catch (Exception e) { originalListener.onFailure(e); return; } - if (searchQueryMetricsEnabled) { - try { - searchQueryCategorizer.categorize(searchRequest.source()); - } catch (Exception e) { - logger.error("Error while trying to categorize the query.", e); + ActionListener requestTransformListener = ActionListener.wrap(sr -> { + if (searchQueryMetricsEnabled) { + try { + searchQueryCategorizer.categorize(sr.source()); + } catch (Exception e) { + logger.error("Error while trying to categorize the query.", e); + } } - } - ActionListener rewriteListener = ActionListener.wrap(source -> { + ActionListener rewriteListener = buildRewriteListener( + sr, + task, + timeProvider, + searchAsyncActionProvider, + listener, + searchRequestOperationsListener + ); + if (sr.source() == null) { + rewriteListener.onResponse(sr.source()); + } else { + Rewriteable.rewriteAndFetch( + sr.source(), + searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), + rewriteListener + ); + } + }, listener::onFailure); + searchRequest.transformRequest(requestTransformListener); + } + + private ActionListener buildRewriteListener( + SearchRequest searchRequest, + Task task, + SearchTimeProvider timeProvider, + SearchAsyncActionProvider searchAsyncActionProvider, + ActionListener listener, + SearchRequestOperationsListener searchRequestOperationsListener + ) { + return ActionListener.wrap(source -> { if (source != searchRequest.source()) { // only set it if it changed - we don't allow null values to be set but it might be already null. this way we catch // situations when source is rewritten to null due to a bug @@ -634,15 +661,6 @@ private void executeRequest( } } }, listener::onFailure); - if (searchRequest.source() == null) { - rewriteListener.onResponse(searchRequest.source()); - } else { - Rewriteable.rewriteAndFetch( - searchRequest.source(), - searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), - rewriteListener - ); - } } static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { diff --git a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java index d4292b85b20a5..8bab961423f91 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java @@ -16,11 +16,13 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.search.SearchPhaseResult; +import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; @@ -117,92 +119,138 @@ protected void afterResponseProcessor(Processor processor, long timeInNanos) {} protected void onResponseProcessorFailed(Processor processor) {} - SearchRequest transformRequest(SearchRequest request) throws SearchPipelineProcessingException { - if (searchRequestProcessors.isEmpty() == false) { - long pipelineStart = relativeTimeSupplier.getAsLong(); - beforeTransformRequest(); - try { - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - request.writeTo(bytesStreamOutput); - try (StreamInput in = bytesStreamOutput.bytes().streamInput()) { - try (StreamInput input = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry)) { - request = new SearchRequest(input); - } - } - } - for (SearchRequestProcessor processor : searchRequestProcessors) { - beforeRequestProcessor(processor); - long start = relativeTimeSupplier.getAsLong(); - try { - request = processor.processRequest(request); - } catch (Exception e) { - onRequestProcessorFailed(processor); - if (processor.isIgnoreFailure()) { - logger.warn( - "The exception from request processor [" - + processor.getType() - + "] in the search pipeline [" - + id - + "] was ignored", - e - ); - } else { - throw e; - } - } finally { - long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); - afterRequestProcessor(processor, took); - } + void transformRequest(SearchRequest request, ActionListener requestListener) throws SearchPipelineProcessingException { + if (searchRequestProcessors.isEmpty()) { + requestListener.onResponse(request); + return; + } + + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + request.writeTo(bytesStreamOutput); + try (StreamInput in = bytesStreamOutput.bytes().streamInput()) { + try (StreamInput input = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry)) { + request = new SearchRequest(input); } - } catch (Exception e) { - onTransformRequestFailure(); - throw new SearchPipelineProcessingException(e); - } finally { - long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); - afterTransformRequest(took); } + } catch (IOException e) { + requestListener.onFailure(new SearchPipelineProcessingException(e)); + return; } - return request; + + ActionListener finalListener = getTerminalSearchRequestActionListener(requestListener); + + // Chain listeners back-to-front + ActionListener currentListener = finalListener; + for (int i = searchRequestProcessors.size() - 1; i >= 0; i--) { + final ActionListener nextListener = currentListener; + SearchRequestProcessor processor = searchRequestProcessors.get(i); + currentListener = ActionListener.wrap(r -> { + long start = relativeTimeSupplier.getAsLong(); + beforeRequestProcessor(processor); + processor.processRequestAsync(r, ActionListener.wrap(rr -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterRequestProcessor(processor, took); + nextListener.onResponse(rr); + }, e -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterRequestProcessor(processor, took); + onRequestProcessorFailed(processor); + if (processor.isIgnoreFailure()) { + logger.warn( + "The exception from request processor [" + + processor.getType() + + "] in the search pipeline [" + + id + + "] was ignored", + e + ); + nextListener.onResponse(r); + } else { + nextListener.onFailure(new SearchPipelineProcessingException(e)); + } + })); + }, finalListener::onFailure); + } + + beforeTransformRequest(); + currentListener.onResponse(request); } - SearchResponse transformResponse(SearchRequest request, SearchResponse response) throws SearchPipelineProcessingException { - if (searchResponseProcessors.isEmpty() == false) { - long pipelineStart = relativeTimeSupplier.getAsLong(); - beforeTransformResponse(); - try { - for (SearchResponseProcessor processor : searchResponseProcessors) { - beforeResponseProcessor(processor); - long start = relativeTimeSupplier.getAsLong(); - try { - response = processor.processResponse(request, response); - } catch (Exception e) { - onResponseProcessorFailed(processor); - if (processor.isIgnoreFailure()) { - logger.warn( - "The exception from response processor [" - + processor.getType() - + "] in the search pipeline [" - + id - + "] was ignored", - e - ); - } else { - throw e; - } - } finally { - long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); - afterResponseProcessor(processor, took); + private ActionListener getTerminalSearchRequestActionListener(ActionListener requestListener) { + final long pipelineStart = relativeTimeSupplier.getAsLong(); + + return ActionListener.wrap(r -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); + afterTransformRequest(took); + requestListener.onResponse(new PipelinedRequest(this, r)); + }, e -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); + afterTransformRequest(took); + onTransformRequestFailure(); + requestListener.onFailure(new SearchPipelineProcessingException(e)); + }); + } + + ActionListener transformResponseListener(SearchRequest request, ActionListener responseListener) { + if (searchResponseProcessors.isEmpty()) { + // No response transformation necessary + return responseListener; + } + + long[] pipelineStart = new long[1]; + + final ActionListener originalListener = responseListener; + responseListener = ActionListener.wrap(r -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart[0]); + afterTransformResponse(took); + originalListener.onResponse(r); + }, e -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart[0]); + afterTransformResponse(took); + onTransformResponseFailure(); + originalListener.onFailure(e); + }); + ActionListener finalListener = responseListener; // Jump directly to this one on exception. + + for (int i = searchResponseProcessors.size() - 1; i >= 0; i--) { + final ActionListener currentFinalListener = responseListener; + final SearchResponseProcessor processor = searchResponseProcessors.get(i); + + responseListener = ActionListener.wrap(r -> { + beforeResponseProcessor(processor); + final long start = relativeTimeSupplier.getAsLong(); + processor.processResponseAsync(request, r, ActionListener.wrap(rr -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterResponseProcessor(processor, took); + currentFinalListener.onResponse(rr); + }, e -> { + onResponseProcessorFailed(processor); + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterResponseProcessor(processor, took); + if (processor.isIgnoreFailure()) { + logger.warn( + "The exception from response processor [" + + processor.getType() + + "] in the search pipeline [" + + id + + "] was ignored", + e + ); + // Pass the previous response through to the next processor in the chain + currentFinalListener.onResponse(r); + } else { + currentFinalListener.onFailure(new SearchPipelineProcessingException(e)); } - } - } catch (Exception e) { - onTransformResponseFailure(); - throw new SearchPipelineProcessingException(e); - } finally { - long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); - afterTransformResponse(took); - } + })); + }, finalListener::onFailure); } - return response; + final ActionListener chainListener = responseListener; + return ActionListener.wrap(r -> { + beforeTransformResponse(); + pipelineStart[0] = relativeTimeSupplier.getAsLong(); + chainListener.onResponse(r); + }, originalListener::onFailure); + } void runSearchPhaseResultsTransformer( diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java index 5a7539808c127..77dfc6bcd4fc5 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java @@ -12,6 +12,7 @@ import org.opensearch.action.search.SearchPhaseResults; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.SearchPhaseResult; /** @@ -27,8 +28,12 @@ public final class PipelinedRequest extends SearchRequest { this.pipeline = pipeline; } - public SearchResponse transformResponse(SearchResponse response) { - return pipeline.transformResponse(this, response); + public void transformRequest(ActionListener requestListener) { + pipeline.transformRequest(this, requestListener); + } + + public ActionListener transformResponseListener(ActionListener responseListener) { + return pipeline.transformResponseListener(this, responseListener); } public void transformSearchPhaseResults( diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java index 739101519ff98..580fe1b7c4216 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java @@ -408,8 +408,7 @@ public PipelinedRequest resolvePipeline(SearchRequest searchRequest) { pipeline = pipelineHolder.pipeline; } } - SearchRequest transformedRequest = pipeline.transformRequest(searchRequest); - return new PipelinedRequest(pipeline, transformedRequest); + return new PipelinedRequest(pipeline, searchRequest); } Map> getRequestProcessorFactories() { diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java index c236cde1a5cc0..427c9e4ab694c 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java @@ -9,10 +9,37 @@ package org.opensearch.search.pipeline; import org.opensearch.action.search.SearchRequest; +import org.opensearch.core.action.ActionListener; /** * Interface for a search pipeline processor that modifies a search request. */ public interface SearchRequestProcessor extends Processor { + + /** + * Transform a {@link SearchRequest}. Executed on the coordinator node before any {@link org.opensearch.action.search.SearchPhase} + * executes. + *

+ * Implement this method if the processor makes no asynchronous calls. + * @param request the executed {@link SearchRequest} + * @return a new {@link SearchRequest} (or the input {@link SearchRequest} if no changes) + * @throws Exception if an error occurs during processing + */ SearchRequest processRequest(SearchRequest request) throws Exception; + + /** + * Transform a {@link SearchRequest}. Executed on the coordinator node before any {@link org.opensearch.action.search.SearchPhase} + * executes. + *

+ * Expert method: Implement this if the processor needs to make asynchronous calls. Otherwise, implement processRequest. + * @param request the executed {@link SearchRequest} + * @param requestListener callback to be invoked on successful processing or on failure + */ + default void processRequestAsync(SearchRequest request, ActionListener requestListener) { + try { + requestListener.onResponse(processRequest(request)); + } catch (Exception e) { + requestListener.onFailure(e); + } + } } diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java index 2f22cedb9b5c0..21136ce208fee 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java @@ -10,10 +10,37 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; +import org.opensearch.core.action.ActionListener; /** * Interface for a search pipeline processor that modifies a search response. */ public interface SearchResponseProcessor extends Processor { + + /** + * Transform a {@link SearchResponse}, possibly based on the executed {@link SearchRequest}. + *

+ * Implement this method if the processor makes no asynchronous calls. + * @param request the executed {@link SearchRequest} + * @param response the current {@link SearchResponse}, possibly modified by earlier processors + * @return a modified {@link SearchResponse} (or the input {@link SearchResponse} if no changes) + * @throws Exception if an error occurs during processing + */ SearchResponse processResponse(SearchRequest request, SearchResponse response) throws Exception; + + /** + * Transform a {@link SearchResponse}, possibly based on the executed {@link SearchRequest}. + *

+ * Expert method: Implement this if the processor needs to make asynchronous calls. Otherwise, implement processResponse. + * @param request the executed {@link SearchRequest} + * @param response the current {@link SearchResponse}, possibly modified by earlier processors + * @param responseListener callback to be invoked on successful processing or on failure + */ + default void processResponseAsync(SearchRequest request, SearchResponse response, ActionListener responseListener) { + try { + responseListener.onResponse(processResponse(request, response)); + } catch (Exception e) { + responseListener.onFailure(e); + } + } } diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java index d44bd3831281f..98d2a7e84d672 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.common.bytes.BytesArray; @@ -194,7 +195,7 @@ public void testResolveIndexDefaultPipeline() throws Exception { service.applyClusterState(cce); SearchRequest searchRequest = new SearchRequest("my_index").source(SearchSourceBuilder.searchSource().size(5)); - PipelinedRequest pipelinedRequest = service.resolvePipeline(searchRequest); + PipelinedRequest pipelinedRequest = syncTransformRequest(service.resolvePipeline(searchRequest)); assertEquals("p1", pipelinedRequest.getPipeline().getId()); assertEquals(10, pipelinedRequest.source().size()); @@ -597,7 +598,7 @@ public void testTransformRequest() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(new TermQueryBuilder("foo", "bar")).size(size); SearchRequest request = new SearchRequest("_index").source(sourceBuilder).pipeline("p1"); - PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(request); + PipelinedRequest pipelinedRequest = syncTransformRequest(searchPipelineService.resolvePipeline(request)); assertEquals(2 * size, pipelinedRequest.source().size()); assertEquals(size, request.source().size()); @@ -641,19 +642,57 @@ public void testTransformResponse() throws Exception { // First try without specifying a pipeline, which should be a no-op. SearchRequest searchRequest = new SearchRequest(); PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); - SearchResponse notTransformedResponse = pipelinedRequest.transformResponse(searchResponse); + SearchResponse notTransformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); assertSame(searchResponse, notTransformedResponse); // Now apply a pipeline searchRequest = new SearchRequest().pipeline("p1"); pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); - SearchResponse transformedResponse = pipelinedRequest.transformResponse(searchResponse); + SearchResponse transformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); assertEquals(size, transformedResponse.getHits().getHits().length); for (int i = 0; i < size; i++) { assertEquals(2.0, transformedResponse.getHits().getHits()[i].getScore(), 0.0001f); } } + /** + * Helper to synchronously apply a response pipeline, returning the transformed response. + */ + private static SearchResponse syncTransformResponse(PipelinedRequest pipelinedRequest, SearchResponse searchResponse) throws Exception { + SearchResponse[] responseBox = new SearchResponse[1]; + Exception[] exceptionBox = new Exception[1]; + ActionListener responseListener = pipelinedRequest.transformResponseListener(ActionListener.wrap(r -> { + responseBox[0] = r; + }, e -> { exceptionBox[0] = e; })); + responseListener.onResponse(searchResponse); + + if (exceptionBox[0] != null) { + throw exceptionBox[0]; + } + return responseBox[0]; + } + + /** + * Helper to synchronously apply a request pipeline, returning the transformed request. + */ + private static PipelinedRequest syncTransformRequest(PipelinedRequest request) throws Exception { + PipelinedRequest[] requestBox = new PipelinedRequest[1]; + Exception[] exceptionBox = new Exception[1]; + + request.transformRequest(ActionListener.wrap(r -> requestBox[0] = (PipelinedRequest) r, e -> exceptionBox[0] = e)); + if (exceptionBox[0] != null) { + throw exceptionBox[0]; + } + return requestBox[0]; + } + + /** + * Helper to synchronously apply a request pipeline and response pipeline, returning the transformed response. + */ + private static SearchResponse syncExecutePipeline(PipelinedRequest request, SearchResponse response) throws Exception { + return syncTransformResponse(syncTransformRequest(request), response); + } + public void testTransformSearchPhase() { SearchPipelineService searchPipelineService = createWithProcessors(); SearchPipelineMetadata metadata = new SearchPipelineMetadata( @@ -875,7 +914,7 @@ public void testInlinePipeline() throws Exception { SearchRequest searchRequest = new SearchRequest().source(sourceBuilder); // Verify pipeline - PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); + PipelinedRequest pipelinedRequest = syncTransformRequest(searchPipelineService.resolvePipeline(searchRequest)); Pipeline pipeline = pipelinedRequest.getPipeline(); assertEquals(SearchPipelineService.AD_HOC_PIPELINE_ID, pipeline.getId()); assertEquals(1, pipeline.getSearchRequestProcessors().size()); @@ -894,7 +933,7 @@ public void testInlinePipeline() throws Exception { SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); SearchResponse searchResponse = new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); - SearchResponse transformedResponse = pipeline.transformResponse(searchRequest, searchResponse); + SearchResponse transformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); for (int i = 0; i < size; i++) { assertEquals(2.0, transformedResponse.getHits().getHits()[i].getScore(), 0.0001); } @@ -946,7 +985,10 @@ public void testExceptionOnRequestProcessing() { SearchRequest searchRequest = new SearchRequest().source(sourceBuilder); // Exception thrown when processing the request - expectThrows(SearchPipelineProcessingException.class, () -> searchPipelineService.resolvePipeline(searchRequest)); + expectThrows( + SearchPipelineProcessingException.class, + () -> syncTransformRequest(searchPipelineService.resolvePipeline(searchRequest)) + ); } public void testExceptionOnResponseProcessing() throws Exception { @@ -974,10 +1016,10 @@ public void testExceptionOnResponseProcessing() throws Exception { SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); // Exception thrown when processing response - expectThrows(SearchPipelineProcessingException.class, () -> pipelinedRequest.transformResponse(response)); + expectThrows(SearchPipelineProcessingException.class, () -> syncTransformResponse(pipelinedRequest, response)); } - public void testCatchExceptionOnRequestProcessing() throws IllegalAccessException { + public void testCatchExceptionOnRequestProcessing() throws Exception { SearchRequestProcessor throwingRequestProcessor = new FakeRequestProcessor("throwing_request", null, null, true, r -> { throw new RuntimeException(); }); @@ -1008,7 +1050,7 @@ public void testCatchExceptionOnRequestProcessing() throws IllegalAccessExceptio "The exception from request processor [throwing_request] in the search pipeline [_ad_hoc_pipeline] was ignored" ) ); - PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); + syncTransformRequest(searchPipelineService.resolvePipeline(searchRequest)); mockAppender.assertAllExpectationsMatched(); } } @@ -1048,7 +1090,7 @@ public void testCatchExceptionOnResponseProcessing() throws Exception { "The exception from response processor [throwing_response] in the search pipeline [_ad_hoc_pipeline] was ignored" ) ); - pipelinedRequest.transformResponse(response); + syncTransformResponse(pipelinedRequest, response); mockAppender.assertAllExpectationsMatched(); } } @@ -1078,15 +1120,15 @@ public void testStats() throws Exception { SearchRequest request = new SearchRequest(); SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); - searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")), response); expectThrows( SearchPipelineProcessingException.class, - () -> searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")).transformResponse(response) + () -> syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")), response) ); - searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")), response); expectThrows( SearchPipelineProcessingException.class, - () -> searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")).transformResponse(response) + () -> syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")), response) ); SearchPipelineStats stats = searchPipelineService.stats(); @@ -1164,12 +1206,12 @@ public void testStatsEnabledIgnoreFailure() throws Exception { SearchRequest request = new SearchRequest(); SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); - searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")), response); // Caught Exception here - searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")).transformResponse(response); - searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")), response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")), response); // Caught Exception here - searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")), response); // when ignoreFailure enabled, the search pipelines will all succeed. SearchPipelineStats stats = searchPipelineService.stats(); @@ -1273,8 +1315,8 @@ private SearchPipelineService getSearchPipelineService( } private static void assertPipelineStats(OperationStats stats, long count, long failed) { - assertEquals(stats.getCount(), count); - assertEquals(stats.getFailedCount(), failed); + assertEquals(count, stats.getCount()); + assertEquals(failed, stats.getFailedCount()); } public void testAdHocRejectingProcessor() { From 9d85e566894ef53e5f2093618b3d455e4d0a04ce Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Tue, 31 Oct 2023 02:31:36 +0800 Subject: [PATCH 112/178] Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor (#10737) * Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor Signed-off-by: Gao Binlong * modify change log Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../ingest/common/DotExpanderProcessor.java | 16 +++------------- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 93b4e4e263063..1d0c30d9812c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -135,6 +135,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) - Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) - Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) +- Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) ### Security diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/DotExpanderProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/DotExpanderProcessor.java index 39c2d67ac0b85..0eab6334854ab 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/DotExpanderProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/DotExpanderProcessor.java @@ -118,25 +118,15 @@ public Processor create( ) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, tag, config, "field"); if (field.contains(".") == false) { - throw ConfigurationUtils.newConfigurationException( - ConfigurationUtils.TAG_KEY, - tag, - "field", - "field does not contain a dot" - ); + throw ConfigurationUtils.newConfigurationException(TYPE, tag, "field", "field does not contain a dot"); } if (field.indexOf('.') == 0 || field.lastIndexOf('.') == field.length() - 1) { - throw ConfigurationUtils.newConfigurationException( - ConfigurationUtils.TAG_KEY, - tag, - "field", - "Field can't start or end with a dot" - ); + throw ConfigurationUtils.newConfigurationException(TYPE, tag, "field", "Field can't start or end with a dot"); } int firstIndex = -1; for (int index = field.indexOf('.'); index != -1; index = field.indexOf('.', index + 1)) { if (index - firstIndex == 1) { - throw ConfigurationUtils.newConfigurationException(ConfigurationUtils.TAG_KEY, tag, "field", "No space between dots"); + throw ConfigurationUtils.newConfigurationException(TYPE, tag, "field", "No space between dots"); } firstIndex = index; } From 9c65350481e1ec14c00fee4cccf6e92c98da0d2c Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Mon, 30 Oct 2023 19:09:31 -0700 Subject: [PATCH 113/178] [Segment Replication] Remove primary targets from replication tracker (#11011) Signed-off-by: Suraj Singh --- .../index/seqno/ReplicationTracker.java | 12 +++ .../index/seqno/ReplicationTrackerTests.java | 80 +++++++++++++++++++ 2 files changed, 92 insertions(+) diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 94f376d923689..352167597fa81 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -71,6 +71,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.OptionalLong; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; @@ -1227,6 +1228,14 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { return this.latestReplicationCheckpoint; } + private boolean isPrimaryRelocation(String allocationId) { + Optional shardRouting = routingTable.shards() + .stream() + .filter(routing -> routing.allocationId().getId().equals(allocationId)) + .findAny(); + return shardRouting.isPresent() && shardRouting.get().primary(); + } + private void createReplicationLagTimers() { for (Map.Entry entry : checkpoints.entrySet()) { final String allocationId = entry.getKey(); @@ -1236,6 +1245,7 @@ private void createReplicationLagTimers() { // it is possible for a shard to be in-sync but not yet removed from the checkpoints collection after a failover event. if (cps.inSync && replicationGroup.getUnavailableInSyncShards().contains(allocationId) == false + && isPrimaryRelocation(allocationId) == false && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint)) { cps.checkpointTimers.computeIfAbsent(latestReplicationCheckpoint, ignored -> new SegmentReplicationLagTimer()); logger.trace( @@ -1267,6 +1277,7 @@ public synchronized void startReplicationLagTimers(ReplicationCheckpoint checkpo final CheckpointState cps = e.getValue(); if (cps.inSync && replicationGroup.getUnavailableInSyncShards().contains(allocationId) == false + && isPrimaryRelocation(e.getKey()) == false && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint) && cps.checkpointTimers.containsKey(latestReplicationCheckpoint)) { cps.checkpointTimers.get(latestReplicationCheckpoint).start(); @@ -1291,6 +1302,7 @@ public synchronized Set getSegmentReplicationStats entry -> entry.getKey().equals(this.shardAllocationId) == false && entry.getValue().inSync && replicationGroup.getUnavailableInSyncShards().contains(entry.getKey()) == false + && isPrimaryRelocation(entry.getKey()) == false ) .map(entry -> buildShardStats(entry.getKey(), entry.getValue())) .collect(Collectors.toUnmodifiableSet()); diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 28c95ddf13fc4..7971591e82bab 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -1907,6 +1907,86 @@ public void testSegmentReplicationCheckpointTracking() { } } + public void testSegmentReplicationCheckpointForRelocatingPrimary() { + Settings settings = Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + final long initialClusterStateVersion = randomNonNegativeLong(); + final int numberOfActiveAllocationsIds = randomIntBetween(2, 2); + final int numberOfInitializingIds = randomIntBetween(2, 2); + final Tuple, Set> activeAndInitializingAllocationIds = randomActiveAndInitializingAllocationIds( + numberOfActiveAllocationsIds, + numberOfInitializingIds + ); + final Set activeAllocationIds = activeAndInitializingAllocationIds.v1(); + final Set initializingIds = activeAndInitializingAllocationIds.v2(); + + AllocationId targetAllocationId = initializingIds.iterator().next(); + AllocationId primaryId = activeAllocationIds.iterator().next(); + String relocatingToNodeId = nodeIdFromAllocationId(targetAllocationId); + + logger.info("--> activeAllocationIds {} Primary {}", activeAllocationIds, primaryId.getId()); + logger.info("--> initializingIds {} Target {}", initializingIds, targetAllocationId); + + final ShardId shardId = new ShardId("test", "_na_", 0); + final IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + for (final AllocationId initializingId : initializingIds) { + boolean primaryRelocationTarget = initializingId.equals(targetAllocationId); + builder.addShard( + TestShardRouting.newShardRouting( + shardId, + nodeIdFromAllocationId(initializingId), + null, + primaryRelocationTarget, + ShardRoutingState.INITIALIZING, + initializingId + ) + ); + } + builder.addShard( + TestShardRouting.newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + relocatingToNodeId, + true, + ShardRoutingState.STARTED, + primaryId + ) + ); + IndexShardRoutingTable routingTable = builder.build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); + assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable)); + assertTrue(activeAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + initializingIds.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); + + final StoreFileMetadata segment_1 = new StoreFileMetadata("segment_1", 5L, "abcd", Version.LATEST); + final ReplicationCheckpoint initialCheckpoint = new ReplicationCheckpoint( + tracker.shardId(), + 0L, + 1, + 1, + 5L, + Codec.getDefault().getName(), + Map.of("segment_1", segment_1) + ); + tracker.setLatestReplicationCheckpoint(initialCheckpoint); + tracker.startReplicationLagTimers(initialCheckpoint); + + final Set expectedIds = initializingIds.stream() + .filter(id -> id.equals(targetAllocationId)) + .map(AllocationId::getId) + .collect(Collectors.toSet()); + + Set groupStats = tracker.getSegmentReplicationStats(); + assertEquals(expectedIds.size(), groupStats.size()); + for (SegmentReplicationShardStats shardStat : groupStats) { + assertEquals(1, shardStat.getCheckpointsBehindCount()); + assertEquals(5L, shardStat.getBytesBehindCount()); + assertTrue(shardStat.getCurrentReplicationLagMillis() >= shardStat.getCurrentReplicationTimeMillis()); + } + } + public void testSegmentReplicationCheckpointTrackingInvalidAllocationIDs() { Settings settings = Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); final long initialClusterStateVersion = randomNonNegativeLong(); From 63aff16c0d08bac44e1d1a6158ee3f838a043074 Mon Sep 17 00:00:00 2001 From: Harsha Vamsi Kalluri Date: Mon, 30 Oct 2023 22:10:36 -0700 Subject: [PATCH 114/178] Adds various Query overrides to Keyword Field (#10425) The keywordfield mapper provides access to various query types, e.g. the termsQuery, fuzzyQuery. These are inherited as is from the StringType. But we do not take into account the fact that keyword fields can have doc_values enabled. This PR adds the ability for various queries to first check if doc_values are enabled and if so out-source the work to lucene to decide if it's better to use index values or doc_values when running queries. Signed-off-by: Harsha Vamsi Kalluri --- CHANGELOG.md | 1 + .../test/search/340_keyword_doc_values.yml | 46 ++++ .../index/mapper/KeywordFieldMapper.java | 238 +++++++++++++++++- .../index/mapper/MappedFieldType.java | 24 ++ .../subphase/highlight/CustomQueryScorer.java | 3 + .../index/mapper/KeywordFieldTypeTests.java | 144 ++++++++++- .../query/MultiMatchQueryBuilderTests.java | 8 +- 7 files changed, 446 insertions(+), 18 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d0c30d9812c3..046693421d07b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) +- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) ### Deprecated diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml new file mode 100644 index 0000000000000..8829e7b100fdd --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml @@ -0,0 +1,46 @@ +--- +"search on keyword fields with doc_values enabled": + - do: + indices.create: + index: test + body: + mappings: + properties: + "some_keyword": + type: "keyword" + index: true + doc_values: true + + - do: + bulk: + index: test + refresh: true + body: + - '{"index": {"_index": "test", "_id": "1" }}' + - '{ "some_keyword": "ingesting some random keyword data" }' + - '{ "index": { "_index": "test", "_id": "2" }}' + - '{ "some_keyword": "400" }' + - '{ "index": { "_index": "test", "_id": "3" } }' + - '{ "some_keyword": "5" }' + + - do: + search: + index: test + body: + query: + prefix: + some_keyword: "ing" + + - match: { hits.hits.0._source.some_keyword: "ingesting some random keyword data" } + + - do: + search: + index: test + body: + query: + range: { + "some_keyword": { + "lt": 500 + } } + + - match: { hits.total.value: 2 } diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index 92ee8067ee4a0..c14b2c92c89c3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -38,11 +38,24 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.Operations; +import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.AutomatonQueries; +import org.opensearch.common.unit.Fuzziness; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; @@ -62,6 +75,8 @@ import java.util.Objects; import java.util.function.Supplier; +import static org.opensearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; + /** * A field mapper for keywords. This mapper accepts strings and indexes them as-is. * @@ -317,7 +332,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S @Override public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (format != null) { - throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't " + "support formats."); } return new SourceValueFetcher(name(), context, nullValue) { @@ -372,17 +387,226 @@ protected BytesRef indexedValueForSearch(Object value) { return getTextSearchInfo().getSearchAnalyzer().normalize(name(), value.toString()); } + @Override + public Query termsQuery(List values, QueryShardContext context) { + failIfNotIndexedAndNoDocValues(); + // has index and doc_values enabled + if (isSearchable() && hasDocValues()) { + BytesRef[] bytesRefs = new BytesRef[values.size()]; + for (int i = 0; i < bytesRefs.length; i++) { + bytesRefs[i] = indexedValueForSearch(values.get(i)); + } + Query indexQuery = new TermInSetQuery(name(), bytesRefs); + Query dvQuery = new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), bytesRefs); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + // if we only have doc_values enabled, we construct a new query with doc_values re-written + if (hasDocValues()) { + BytesRef[] bytesRefs = new BytesRef[values.size()]; + for (int i = 0; i < bytesRefs.length; i++) { + bytesRefs[i] = indexedValueForSearch(values.get(i)); + } + return new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), bytesRefs); + } + // has index enabled, we're going to return the query as is + return super.termsQuery(values, context); + } + + @Override + public Query prefixQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + QueryShardContext context + ) { + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[prefix] queries cannot be executed when '" + + ALLOW_EXPENSIVE_QUERIES.getKey() + + "' is set to false. For optimised prefix queries on text " + + "fields please enable [index_prefixes]." + ); + } + failIfNotIndexedAndNoDocValues(); + if (isSearchable() && hasDocValues()) { + Query indexQuery = super.prefixQuery(value, method, caseInsensitive, context); + Query dvQuery = super.prefixQuery(value, MultiTermQuery.DOC_VALUES_REWRITE, caseInsensitive, context); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + if (caseInsensitive) { + return AutomatonQueries.caseInsensitivePrefixQuery( + (new Term(name(), indexedValueForSearch(value))), + MultiTermQuery.DOC_VALUES_REWRITE + ); + } + return new PrefixQuery(new Term(name(), indexedValueForSearch(value)), MultiTermQuery.DOC_VALUES_REWRITE); + } + return super.prefixQuery(value, method, caseInsensitive, context); + } + + @Override + public Query regexpQuery( + String value, + int syntaxFlags, + int matchFlags, + int maxDeterminizedStates, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[regexp] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to " + "false." + ); + } + failIfNotIndexedAndNoDocValues(); + if (isSearchable() && hasDocValues()) { + Query indexQuery = super.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); + Query dvQuery = super.regexpQuery( + value, + syntaxFlags, + matchFlags, + maxDeterminizedStates, + MultiTermQuery.DOC_VALUES_REWRITE, + context + ); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + return new RegexpQuery( + new Term(name(), indexedValueForSearch(value)), + syntaxFlags, + matchFlags, + RegexpQuery.DEFAULT_PROVIDER, + maxDeterminizedStates, + MultiTermQuery.DOC_VALUES_REWRITE + ); + } + return super.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); + } + + @Override + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[range] queries on [text] or [keyword] fields cannot be executed when '" + + ALLOW_EXPENSIVE_QUERIES.getKey() + + "' is set to false." + ); + } + failIfNotIndexedAndNoDocValues(); + if (isSearchable() && hasDocValues()) { + Query indexQuery = new TermRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper + ); + Query dvQuery = new TermRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper, + MultiTermQuery.DOC_VALUES_REWRITE + ); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + return new TermRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper, + MultiTermQuery.DOC_VALUES_REWRITE + ); + } + return new TermRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper + ); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + failIfNotIndexedAndNoDocValues(); + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[fuzzy] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to " + "false." + ); + } + if (isSearchable() && hasDocValues()) { + Query indexQuery = super.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, context); + Query dvQuery = super.fuzzyQuery( + value, + fuzziness, + prefixLength, + maxExpansions, + transpositions, + MultiTermQuery.DOC_VALUES_REWRITE, + context + ); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + return new FuzzyQuery( + new Term(name(), indexedValueForSearch(value)), + fuzziness.asDistance(BytesRefs.toString(value)), + prefixLength, + maxExpansions, + transpositions, + MultiTermQuery.DOC_VALUES_REWRITE + ); + } + return super.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, context); + } + @Override public Query wildcardQuery( String value, @Nullable MultiTermQuery.RewriteMethod method, - boolean caseInsensitve, + boolean caseInsensitive, QueryShardContext context ) { - // keyword field types are always normalized, so ignore case sensitivity and force normalize the wildcard + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[wildcard] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to " + "false." + ); + } + failIfNotIndexedAndNoDocValues(); + // keyword field types are always normalized, so ignore case sensitivity and force normalize the + // wildcard // query text - return super.wildcardQuery(value, method, caseInsensitve, true, context); + if (isSearchable() && hasDocValues()) { + Query indexQuery = super.wildcardQuery(value, method, caseInsensitive, true, context); + Query dvQuery = super.wildcardQuery(value, MultiTermQuery.DOC_VALUES_REWRITE, caseInsensitive, true, context); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + Term term; + value = normalizeWildcardPattern(name(), value, getTextSearchInfo().getSearchAnalyzer()); + term = new Term(name(), value); + if (caseInsensitive) { + return AutomatonQueries.caseInsensitiveWildcardQuery(term, method); + } + return new WildcardQuery(term, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, MultiTermQuery.DOC_VALUES_REWRITE); + } + return super.wildcardQuery(value, method, caseInsensitive, true, context); } + } private final boolean indexed; @@ -422,8 +646,10 @@ protected KeywordFieldMapper( this.indexAnalyzers = builder.indexAnalyzers; } - /** Values that have more chars than the return value of this method will - * be skipped at parsing time. */ + /** + * Values that have more chars than the return value of this method will + * be skipped at parsing time. + */ public int ignoreAbove() { return ignoreAbove; } diff --git a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java index 997835f712038..62acad99074c2 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java @@ -269,6 +269,21 @@ public Query fuzzyQuery( ); } + // Fuzzy Query with re-write method + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + throw new IllegalArgumentException( + "Can only use fuzzy queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]" + ); + } + // Case sensitive form of prefix query public final Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { return prefixQuery(value, method, false, context); @@ -433,6 +448,15 @@ protected final void failIfNotIndexed() { } } + protected final void failIfNotIndexedAndNoDocValues() { + // we fail if a field is both not indexed and does not have doc_values enabled + if (isIndexed == false && hasDocValues() == false) { + throw new IllegalArgumentException( + "Cannot search on field [" + name() + "] since it is both not indexed," + " and does not have doc_values enabled." + ); + } + } + public boolean eagerGlobalOrdinals() { return eagerGlobalOrdinals; } diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/CustomQueryScorer.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/CustomQueryScorer.java index d0fb0f6da53c4..89c77b3cd403f 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/CustomQueryScorer.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/CustomQueryScorer.java @@ -33,6 +33,7 @@ package org.opensearch.search.fetch.subphase.highlight; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.QueryScorer; import org.apache.lucene.search.highlight.WeightedSpanTerm; @@ -104,6 +105,8 @@ protected void extract(Query query, float boost, Map t super.extract(((FunctionScoreQuery) query).getSubQuery(), boost, terms); } else if (query instanceof OpenSearchToParentBlockJoinQuery) { super.extract(((OpenSearchToParentBlockJoinQuery) query).getChildQuery(), boost, terms); + } else if (query instanceof IndexOrDocValuesQuery) { + super.extract(((IndexOrDocValuesQuery) query).getIndexQuery(), boost, terms); } else { super.extract(query, boost, terms); } diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java index 0d8ef6784a28c..393c448330142 100644 --- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java @@ -44,13 +44,17 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.NormsFieldExistsQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.Operations; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -128,14 +132,29 @@ public void testTermsQuery() { List terms = new ArrayList<>(); terms.add(new BytesRef("foo")); terms.add(new BytesRef("bar")); - assertEquals(new TermInSetQuery("field", terms), ft.termsQuery(Arrays.asList("foo", "bar"), null)); + Query expected = new IndexOrDocValuesQuery( + new TermInSetQuery("field", terms), + new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, "field", terms) + ); + assertEquals(expected, ft.termsQuery(Arrays.asList("foo", "bar"), null)); - MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + Query expectedIndex = new TermInSetQuery("field", terms); + assertEquals(expectedIndex, onlyIndexed.termsQuery(Arrays.asList("foo", "bar"), null)); + + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query expectedDocValues = new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, "field", terms); + assertEquals(expectedDocValues, onlyDocValues.termsQuery(Arrays.asList("foo", "bar"), null)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.termsQuery(Arrays.asList("foo", "bar"), null) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); } public void testExistsQuery() { @@ -157,9 +176,36 @@ public void testExistsQuery() { public void testRangeQuery() { MappedFieldType ft = new KeywordFieldType("field"); + + Query indexExpected = new TermRangeQuery("field", BytesRefs.toBytesRef("foo"), BytesRefs.toBytesRef("bar"), true, false); + Query dvExpected = new TermRangeQuery( + "field", + BytesRefs.toBytesRef("foo"), + BytesRefs.toBytesRef("bar"), + true, + false, + MultiTermQuery.DOC_VALUES_REWRITE + ); + + Query expected = new IndexOrDocValuesQuery(indexExpected, dvExpected); + Query actual = ft.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC); + assertEquals(expected, actual); + + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(indexExpected, onlyIndexed.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC)); + + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertEquals(dvExpected, onlyDocValues.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC) + ); + assertEquals( - new TermRangeQuery("field", BytesRefs.toBytesRef("foo"), BytesRefs.toBytesRef("bar"), true, false), - ft.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC) + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() ); OpenSearchException ee = expectThrows( @@ -175,16 +221,37 @@ public void testRangeQuery() { public void testRegexpQuery() { MappedFieldType ft = new KeywordFieldType("field"); assertEquals( - new RegexpQuery(new Term("field", "foo.*")), + new IndexOrDocValuesQuery( + new RegexpQuery(new Term("field", "foo.*")), + new RegexpQuery(new Term("field", "foo.*"), 0, 0, RegexpQuery.DEFAULT_PROVIDER, 10, MultiTermQuery.DOC_VALUES_REWRITE) + ), ft.regexpQuery("foo.*", 0, 0, 10, MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC) ); - MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query indexExpected = new RegexpQuery(new Term("field", "foo.*")); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(indexExpected, onlyIndexed.regexpQuery("foo.*", 0, 0, 10, MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC)); + + Query dvExpected = new RegexpQuery( + new Term("field", "foo.*"), + 0, + 0, + RegexpQuery.DEFAULT_PROVIDER, + 10, + MultiTermQuery.DOC_VALUES_REWRITE + ); + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertEquals(dvExpected, onlyDocValues.regexpQuery("foo.*", 0, 0, 10, MultiTermQuery.DOC_VALUES_REWRITE, MOCK_QSC)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.regexpQuery("foo.*", 0, 0, 10, null, MOCK_QSC) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); OpenSearchException ee = expectThrows( OpenSearchException.class, @@ -200,12 +267,26 @@ public void testFuzzyQuery() { ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC) ); - MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query indexExpected = new FuzzyQuery(new Term("field", "foo"), 2, 1, 50, true); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(indexExpected, onlyIndexed.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC)); + + Query dvExpected = new FuzzyQuery(new Term("field", "foo"), 2, 1, 50, true, MultiTermQuery.DOC_VALUES_REWRITE); + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertEquals( + dvExpected, + onlyDocValues.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MultiTermQuery.DOC_VALUES_REWRITE, MOCK_QSC) + ); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> unsearchable.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC) + () -> unsearchable.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MultiTermQuery.DOC_VALUES_REWRITE, MOCK_QSC) + ); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); OpenSearchException ee = expectThrows( OpenSearchException.class, @@ -214,6 +295,47 @@ public void testFuzzyQuery() { assertEquals("[fuzzy] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", ee.getMessage()); } + public void testWildCardQuery() { + MappedFieldType ft = new KeywordFieldType("field"); + Query expected = new IndexOrDocValuesQuery( + new WildcardQuery(new Term("field", new BytesRef("foo*"))), + new WildcardQuery( + new Term("field", new BytesRef("foo*")), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.DOC_VALUES_REWRITE + ) + ); + assertEquals(expected, ft.wildcardQuery("foo*", MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC)); + + Query indexExpected = new WildcardQuery(new Term("field", new BytesRef("foo*"))); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(indexExpected, onlyIndexed.wildcardQuery("foo*", MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC)); + + Query dvExpected = new WildcardQuery( + new Term("field", new BytesRef("foo*")), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.DOC_VALUES_REWRITE + ); + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertEquals(dvExpected, onlyDocValues.wildcardQuery("foo*", MultiTermQuery.DOC_VALUES_REWRITE, MOCK_QSC)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.wildcardQuery("foo*", MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC) + ); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + + OpenSearchException ee = expectThrows( + OpenSearchException.class, + () -> ft.wildcardQuery("foo*", MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC_DISALLOW_EXPENSIVE) + ); + assertEquals("[wildcard] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", ee.getMessage()); + } + public void testNormalizeQueries() { MappedFieldType ft = new KeywordFieldType("field"); assertEquals(new TermQuery(new Term("field", new BytesRef("FOO"))), ft.termQuery("FOO", null)); diff --git a/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java index e1391393f44fa..39f5bb313fe9e 100644 --- a/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java @@ -304,10 +304,16 @@ public void testToQueryBooleanPrefixMultipleFields() throws IOException { } else if (disjunct instanceof PrefixQuery) { final PrefixQuery secondDisjunct = (PrefixQuery) disjunct; assertThat(secondDisjunct.getPrefix(), equalTo(new Term(KEYWORD_FIELD_NAME, "foo bar"))); + } else if (disjunct instanceof IndexOrDocValuesQuery) { + final IndexOrDocValuesQuery iodvqDisjunct = (IndexOrDocValuesQuery) disjunct; + assertThat(iodvqDisjunct.getIndexQuery().toString(), equalTo("mapped_string_2:foo bar*")); } else { throw new AssertionError(); } - assertThat(disjunct, either(instanceOf(BooleanQuery.class)).or(instanceOf(PrefixQuery.class))); + assertThat( + disjunct, + either(instanceOf(BooleanQuery.class)).or(instanceOf(PrefixQuery.class)).or(instanceOf(IndexOrDocValuesQuery.class)) + ); } } } From a2febe956defae25417a84bca3646efc2bafb7a5 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 31 Oct 2023 01:21:42 -0700 Subject: [PATCH 115/178] Fix SegRep bug where primary shards do not update tracked replica state post failover/relocation (#11017) Signed-off-by: Marc Handalian --- .../replication/SegmentReplicationIT.java | 57 +++++++++++++++++++ .../SegmentReplicationTargetService.java | 7 +++ .../SegmentReplicationTargetServiceTests.java | 3 +- 3 files changed, 66 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index f48df082a25dc..a2996d87a851b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.StandardDirectoryReader; import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; +import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; @@ -62,6 +63,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.SegmentReplicationShardStats; @@ -94,6 +96,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static java.util.Arrays.asList; @@ -1776,4 +1779,58 @@ public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { assertThat(response.getIndex(), equalTo(INDEX_NAME)); } + + public void testReplicaAlreadyAtCheckpoint() throws Exception { + final List nodes = new ArrayList<>(); + final String primaryNode = internalCluster().startDataOnlyNode(); + nodes.add(primaryNode); + final Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); + createIndex(INDEX_NAME, settings); + ensureGreen(INDEX_NAME); + // start a replica node, initially will be empty with no shard assignment. + final String replicaNode = internalCluster().startDataOnlyNode(); + nodes.add(replicaNode); + final String replicaNode2 = internalCluster().startDataOnlyNode(); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)) + ); + ensureGreen(INDEX_NAME); + + // index a doc. + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", randomInt()).get(); + refresh(INDEX_NAME); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + IndexShard replica_1 = getIndexShard(replicaNode, INDEX_NAME); + IndexShard replica_2 = getIndexShard(replicaNode2, INDEX_NAME); + // wait until a replica is promoted & finishes engine flip, we don't care which one + AtomicReference primary = new AtomicReference<>(); + assertBusy(() -> { + assertTrue("replica should be promoted as a primary", replica_1.routingEntry().primary() || replica_2.routingEntry().primary()); + primary.set(replica_1.routingEntry().primary() ? replica_1 : replica_2); + }); + + FlushRequest request = new FlushRequest(INDEX_NAME); + request.force(true); + primary.get().flush(request); + + assertBusy(() -> { + assertEquals( + replica_1.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica_2.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + + assertBusy(() -> { + ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get(); + ReplicationStats replicationStats = clusterStatsResponse.getIndicesStats().getSegments().getReplicationStats(); + assertEquals(0L, replicationStats.maxBytesBehind); + assertEquals(0L, replicationStats.maxReplicationLag); + assertEquals(0L, replicationStats.totalBytesBehind); + }); + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 46095adfe96b4..73da0482537ad 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -53,6 +53,7 @@ import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; +import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.UPDATE_VISIBLE_CHECKPOINT; /** @@ -282,6 +283,12 @@ public void onReplicationFailure( } } }); + } else if (replicaShard.isSegmentReplicationAllowed()) { + // if we didn't process the checkpoint because we are up to date, + // send our latest checkpoint to the primary to update tracking. + // replicationId is not used by the primary set to a default value. + final long replicationId = NO_OPS_PERFORMED; + updateVisibleCheckpoint(replicationId, replicaShard); } } else { logger.trace( diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 7b02635525264..252f3975bab25 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -55,7 +55,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; -import static org.junit.Assert.assertEquals; +import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.atLeastOnce; @@ -247,6 +247,7 @@ public void testAlreadyOnNewCheckpoint() { SegmentReplicationTargetService spy = spy(sut); spy.onNewCheckpoint(replicaShard.getLatestReplicationCheckpoint(), replicaShard); verify(spy, times(0)).startReplication(any(), any(), any()); + verify(spy, times(1)).updateVisibleCheckpoint(NO_OPS_PERFORMED, replicaShard); } @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") From cb39d0057e7277e02a99e2cf5dabfc36855991c4 Mon Sep 17 00:00:00 2001 From: Anshu Agarwal Date: Tue, 31 Oct 2023 20:07:17 +0530 Subject: [PATCH 116/178] Fix weighted routing flaky test (#10955) Signed-off-by: Anshu Agarwal Co-authored-by: Anshu Agarwal --- .../opensearch/search/SearchWeightedRoutingIT.java | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java index 5207dab83f1d9..aa1fe695ecc12 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -501,8 +501,9 @@ public void testShardRoutingWithNetworkDisruption_FailOpenEnabled() throws Excep logger.info("--> creating network partition disruption"); final String clusterManagerNode1 = internalCluster().getClusterManagerName(); - Set nodesInOneSide = Stream.of(clusterManagerNode1, nodeMap.get("b").get(0)).collect(Collectors.toCollection(HashSet::new)); - Set nodesInOtherSide = Stream.of(nodeMap.get("a").get(0)).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOneSide = Stream.of(nodeMap.get("a").get(0)).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOtherSide = Stream.of(clusterManagerNode1, nodeMap.get("b").get(0), nodeMap.get("c").get(0)) + .collect(Collectors.toCollection(HashSet::new)); NetworkDisruption networkDisruption = new NetworkDisruption( new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), @@ -870,8 +871,7 @@ private void assertSearchInAZ(String az) { SearchStats.Stats searchStats = stat.getIndices().getSearch().getTotal(); if (stat.getNode().isDataNode()) { if (stat.getNode().getId().equals(dataNodeId)) { - Assert.assertTrue(searchStats.getFetchCount() > 0L); - Assert.assertTrue(searchStats.getQueryCount() > 0L); + Assert.assertTrue(searchStats.getFetchCount() > 0L || searchStats.getQueryCount() > 0L); } } } @@ -945,7 +945,6 @@ public void testSearchAggregationWithNetworkDisruption_FailOpenEnabled() throws } logger.info("--> network disruption is stopped"); - networkDisruption.stopDisrupting(); for (int i = 0; i < 50; i++) { try { @@ -962,6 +961,8 @@ public void testSearchAggregationWithNetworkDisruption_FailOpenEnabled() throws fail("search should not fail"); } } + networkDisruption.stopDisrupting(); + assertSearchInAZ("b"); assertSearchInAZ("c"); assertNoSearchInAZ("a"); From aa0fddb50d6150330718ac0af23b72bd1598ae4a Mon Sep 17 00:00:00 2001 From: Harsha Vamsi Kalluri Date: Tue, 31 Oct 2023 17:36:35 -0700 Subject: [PATCH 117/178] Fix changelog for multiterm perf PR (#11038) Signed-off-by: Harsha Vamsi Kalluri --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 046693421d07b..54438d28ad35f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,7 +63,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) -- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) ### Deprecated @@ -125,6 +124,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) - Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) - [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999)) +- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) ### Deprecated @@ -141,4 +141,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x From a512dae4ac84792018cc81d5eacc8c69deefda1a Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 1 Nov 2023 07:58:00 -0400 Subject: [PATCH 118/178] Add Java 11/17/21 matrix for precommit and assemble checks (#11035) Signed-off-by: Andriy Redko --- .github/workflows/assemble.yml | 5 +++-- .github/workflows/precommit.yml | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml index 6a66ac5fb5609..87cecdf38c072 100644 --- a/.github/workflows/assemble.yml +++ b/.github/workflows/assemble.yml @@ -7,13 +7,14 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: + java: [ 11, 17, 21 ] os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v4 - - name: Set up JDK 11 + - name: Set up JDK ${{ matrix.java }} uses: actions/setup-java@v3 with: - java-version: 11 + java-version: ${{ matrix.java }} distribution: temurin - name: Setup docker (missing on MacOS) if: runner.os == 'macos' diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index b04f404b11c55..cd75eb47946a4 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -7,13 +7,14 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: + java: [ 11, 17, 21 ] os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v4 - - name: Set up JDK 11 + - name: Set up JDK ${{ matrix.java }} uses: actions/setup-java@v3 with: - java-version: 11 + java-version: ${{ matrix.java }} distribution: temurin cache: gradle - name: Run Gradle (precommit) From 8673fa937db405b8d614f8d4a02c0aa52587c037 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Wed, 1 Nov 2023 21:12:35 +0530 Subject: [PATCH 119/178] Fix testDeleteAllAndListAllPits and testDeleteWhileSearch flaky tests (#10946) Signed-off-by: Bharathwaj G --- .../java/org/opensearch/client/PitIT.java | 21 +++++++++---------- .../search/pit/DeletePitMultiNodeIT.java | 6 +++++- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java index d5c1888e78b5d..b0990560b08ba 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -72,7 +71,7 @@ public void testCreateAndDeletePit() throws IOException { assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(createPitResponse.getId())); } - public void testDeleteAllAndListAllPits() throws IOException, InterruptedException { + public void testDeleteAllAndListAllPits() throws Exception { CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); CreatePitResponse pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); @@ -91,11 +90,9 @@ public void testDeleteAllAndListAllPits() throws IOException, InterruptedExcepti List pits = getAllPitResponse.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); assertTrue(pits.contains(pitResponse.getId())); assertTrue(pits.contains(pitResponse1.getId())); - CountDownLatch countDownLatch = new CountDownLatch(1); ActionListener deletePitListener = new ActionListener<>() { @Override public void onResponse(DeletePitResponse response) { - countDownLatch.countDown(); for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { assertTrue(deletePitInfo.isSuccessful()); } @@ -103,19 +100,20 @@ public void onResponse(DeletePitResponse response) { @Override public void onFailure(Exception e) { - countDownLatch.countDown(); if (!(e instanceof OpenSearchStatusException)) { throw new AssertionError("Delete all failed"); } } }; final CreatePitResponse pitResponse3 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); - + assertTrue(pitResponse3.getId() != null); ActionListener getPitsListener = new ActionListener() { @Override public void onResponse(GetAllPitNodesResponse response) { List pits = response.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); assertTrue(pits.contains(pitResponse3.getId())); + // delete all pits + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); } @Override @@ -126,11 +124,12 @@ public void onFailure(Exception e) { } }; highLevelClient().getAllPitsAsync(RequestOptions.DEFAULT, getPitsListener); - highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); + // validate no pits case - getAllPitResponse = highLevelClient().getAllPits(RequestOptions.DEFAULT); - assertTrue(getAllPitResponse.getPitInfos().size() == 0); - highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + assertBusy(() -> { + GetAllPitNodesResponse getAllPitResponse1 = highLevelClient().getAllPits(RequestOptions.DEFAULT); + assertTrue(getAllPitResponse1.getPitInfos().size() == 0); + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + }); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java index 4a178e7066846..8ae652082f653 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java @@ -309,7 +309,11 @@ public void testDeleteWhileSearch() throws Exception { private void verifySearchContextMissingException(ShardSearchFailure[] failures) { for (ShardSearchFailure failure : failures) { Throwable cause = ExceptionsHelper.unwrapCause(failure.getCause()); - assertTrue(failure.toString(), cause instanceof SearchContextMissingException); + if (failure.toString().contains("reader_context is already closed can't increment refCount current count")) { + // this is fine, expected search error when context is already deleted + } else { + assertTrue(failure.toString(), cause instanceof SearchContextMissingException); + } } } From c851b34735294a4eb6baf5c1254de1b130daa6c4 Mon Sep 17 00:00:00 2001 From: Henri Yandell <477715+hyandell@users.noreply.github.com> Date: Thu, 2 Nov 2023 16:38:30 -0700 Subject: [PATCH 120/178] Adding slf4j license header to LoggerMessageFormat.java (#11069) * Adding slf4j license header per #9879 Signed-off-by: Henri Yandell Signed-off-by: Henri Yandell <477715+hyandell@users.noreply.github.com> Signed-off-by: Peter Nied Co-authored-by: Peter Nied --- CHANGELOG.md | 1 + .../core/common/logging/LoggerMessageFormat.java | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 54438d28ad35f..9eb3486701641 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -100,6 +100,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) - Added cluster setting cluster.restrict.index.replication_type to restrict setting of index setting replication type ([#10866](https://github.com/opensearch-project/OpenSearch/pull/10866)) - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) +- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) ### Dependencies - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) diff --git a/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java b/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java index cd75bddd680e5..c7b9bee3cbf4d 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java +++ b/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java @@ -30,6 +30,13 @@ * GitHub history for details. */ +/* + * This code is based on code from SFL4J 1.5.11 + * Copyright (c) 2004-2007 QOS.ch + * All rights reserved. + * SPDX-License-Identifier: MIT + */ + package org.opensearch.core.common.logging; import java.util.HashSet; From 38999b2f8c796d1575275b09644a9e99f5145fbb Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Thu, 2 Nov 2023 16:53:29 -0700 Subject: [PATCH 121/178] =?UTF-8?q?Revert=20"Add=20cluster=20setting=20clu?= =?UTF-8?q?ster.restrict.index.replication=5Ftype=20t=E2=80=A6=20(#10866)"?= =?UTF-8?q?=20(#11072)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 8b2173910f754a48773b3283e1a511cbc1a9db78. Signed-off-by: Poojita Raj --- CHANGELOG.md | 1 - .../SegmentReplicationClusterSettingIT.java | 27 ------------------- .../metadata/MetadataCreateIndexService.java | 19 ------------- .../common/settings/ClusterSettings.java | 3 +-- .../opensearch/indices/IndicesService.java | 11 -------- .../MetadataCreateIndexServiceTests.java | 16 ++--------- 6 files changed, 3 insertions(+), 74 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9eb3486701641..2753249c9d956 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -98,7 +98,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) - Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) - [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) -- Added cluster setting cluster.restrict.index.replication_type to restrict setting of index setting replication type ([#10866](https://github.com/opensearch-project/OpenSearch/pull/10866)) - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) - Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java index 186a5ce39f131..a82fd8d845709 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java @@ -19,7 +19,6 @@ import org.opensearch.test.OpenSearchIntegTestCase; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; -import static org.opensearch.indices.IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -124,30 +123,4 @@ public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Ex assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), false); } - public void testIndexReplicationTypeWhenRestrictSettingTrue() { - testRestrictIndexReplicationTypeSetting(true, randomFrom(ReplicationType.values())); - } - - public void testIndexReplicationTypeWhenRestrictSettingFalse() { - testRestrictIndexReplicationTypeSetting(false, randomFrom(ReplicationType.values())); - } - - private void testRestrictIndexReplicationTypeSetting(boolean setRestrict, ReplicationType replicationType) { - String expectedExceptionMsg = - "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.restrict.index.replication_type=true];"; - String clusterManagerName = internalCluster().startNode( - Settings.builder().put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), setRestrict).build() - ); - internalCluster().startDataOnlyNodes(1); - - // Test create index fails - Settings indexSettings = Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, replicationType).build(); - if (setRestrict) { - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings)); - assertEquals(expectedExceptionMsg, exception.getMessage()); - } else { - createIndex(INDEX_NAME, indexSettings); - } - } - } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 78a22fe11f072..8d76a39712ee3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1252,7 +1252,6 @@ List getIndexSettingsValidationErrors( if (forbidPrivateIndexSettings) { validationErrors.addAll(validatePrivateSettingsNotExplicitlySet(settings, indexScopedSettings)); } - validateIndexReplicationTypeSettings(settings, clusterService.getClusterSettings()).ifPresent(validationErrors::add); if (indexName.isEmpty() || indexName.get().charAt(0) != '.') { // Apply aware replica balance validation only to non system indices int replicaCount = settings.getAsInt( @@ -1307,24 +1306,6 @@ private static List validateIndexCustomPath(Settings settings, @Nullable return validationErrors; } - /** - * Validates {@code index.replication.type} is not set if {@code cluster.restrict.index.replication_type} is set to true. - * - * @param requestSettings settings passed in during index create request - * @param clusterSettings cluster setting - */ - private static Optional validateIndexReplicationTypeSettings(Settings requestSettings, ClusterSettings clusterSettings) { - if (requestSettings.hasValue(SETTING_REPLICATION_TYPE) - && clusterSettings.get(IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING)) { - return Optional.of( - "index setting [index.replication.type] is not allowed to be set as [" - + IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey() - + "=true]" - ); - } - return Optional.empty(); - } - /** * Validates the settings and mappings for shrinking an index. * diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 3a1fff21db366..5ab1f49949679 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -691,8 +691,7 @@ public void apply(Settings value, Settings current, Settings previous) { AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, - CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, - IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING + CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT ) ) ); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 36abc77893d81..50c551c2be29b 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -299,17 +299,6 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); - /** - * This setting is used to restrict creation of index where the 'index.replication.type' index setting is set. - * If disabled, the replication type can be specified. - */ - public static final Setting CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING = Setting.boolSetting( - "cluster.restrict.index.replication_type", - false, - Property.NodeScope, - Property.Final - ); - /** * The node's settings. */ diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index cace66d8c6d9e..e40826915c848 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -139,7 +139,6 @@ import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; import static org.opensearch.node.Node.NODE_ATTRIBUTES; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -1178,8 +1177,6 @@ public void testvalidateIndexSettings() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a, b") .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) - .put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), true) - .put(SETTING_REPLICATION_TYPE, randomFrom(ReplicationType.values())) .build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); when(clusterService.getSettings()).thenReturn(settings); @@ -1203,12 +1200,8 @@ public void testvalidateIndexSettings() { ); List validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); - assertThat(validationErrors.size(), is(2)); - assertThat( - validationErrors.get(0), - is("index setting [index.replication.type] is not allowed to be set as [cluster.restrict.index.replication_type=true]") - ); - assertThat(validationErrors.get(1), is("expected total copies needs to be a multiple of total awareness attributes [3]")); + assertThat(validationErrors.size(), is(1)); + assertThat(validationErrors.get(0), is("expected total copies needs to be a multiple of total awareness attributes [3]")); settings = Settings.builder() .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") @@ -1216,13 +1209,8 @@ public void testvalidateIndexSettings() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) .put(SETTING_NUMBER_OF_REPLICAS, 2) - .put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), false) - .put(SETTING_REPLICATION_TYPE, randomFrom(ReplicationType.values())) .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - when(clusterService.getClusterSettings()).thenReturn(clusterSettings); - validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); assertThat(validationErrors.size(), is(0)); From 54fa0508130280c8f64c9f674b8104844d86af91 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Thu, 2 Nov 2023 18:55:59 -0700 Subject: [PATCH 122/178] Refactor SegmentReplicationTargetService to only hold completed target state instead of the entire target. (#11043) Signed-off-by: Marc Handalian --- .../replication/SegmentReplicationTargetService.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 73da0482537ad..cb738d74000bc 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -70,7 +70,7 @@ public class SegmentReplicationTargetService implements IndexEventListener { private final ReplicationCollection onGoingReplications; - private final Map completedReplications = ConcurrentCollections.newConcurrentMap(); + private final Map completedReplications = ConcurrentCollections.newConcurrentMap(); private final SegmentReplicationSourceFactory sourceFactory; @@ -192,7 +192,7 @@ public SegmentReplicationState getOngoingEventSegmentReplicationState(ShardId sh */ @Nullable public SegmentReplicationState getlatestCompletedEventSegmentReplicationState(ShardId shardId) { - return Optional.ofNullable(completedReplications.get(shardId)).map(SegmentReplicationTarget::state).orElse(null); + return completedReplications.get(shardId); } /** @@ -525,7 +525,7 @@ public void onResponse(Void o) { logger.debug(() -> new ParameterizedMessage("Finished replicating {} marking as done.", target.description())); onGoingReplications.markAsDone(replicationId); if (target.state().getIndex().recoveredFileCount() != 0 && target.state().getIndex().recoveredBytes() != 0) { - completedReplications.put(target.shardId(), target); + completedReplications.put(target.shardId(), target.state()); } } From 1130d656a64a3c8e7476c5776b9e6c9efe937255 Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Fri, 3 Nov 2023 18:47:30 -0700 Subject: [PATCH 123/178] Disable concurrent aggs for Diversified Sampler and Sampler aggs (#11087) Signed-off-by: Jay Deng --- CHANGELOG.md | 1 + .../bucket/DiversifiedSamplerIT.java | 9 +++----- .../search/aggregations/bucket/SamplerIT.java | 22 +++++++++++++++++-- .../sampler/DiversifiedAggregatorFactory.java | 2 +- .../sampler/SamplerAggregatorFactory.java | 2 +- 5 files changed, 26 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2753249c9d956..0b8f1084eafd4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -125,6 +125,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) - [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999)) - Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) +- Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 865dd670fbf68..1d5f7f93e7410 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -33,9 +33,9 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; +import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.TermQueryBuilder; @@ -132,13 +132,14 @@ public void setupSuiteScopeCluster() throws Exception { client().prepareIndex("test") .setId("" + i) .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); client().prepareIndex("idx_unmapped_author") .setId("" + i) .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); } - client().admin().indices().refresh(new RefreshRequest("test")).get(); } public void testIssue10719() throws Exception { @@ -221,10 +222,6 @@ public void testNestedDiversity() throws Exception { } public void testNestedSamples() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10046", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); // Test samples nested under samples int MAX_DOCS_PER_AUTHOR = 1; int MAX_DOCS_PER_GENRE = 2; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java index 7033c42c5d661..c7b03d21cb6bb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java @@ -34,9 +34,9 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; +import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.TermQueryBuilder; @@ -132,13 +132,14 @@ public void setupSuiteScopeCluster() throws Exception { client().prepareIndex("test") .setId("" + i) .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); client().prepareIndex("idx_unmapped_author") .setId("" + i) .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); } - client().admin().indices().refresh(new RefreshRequest("test")).get(); } public void testIssue10719() throws Exception { @@ -195,6 +196,23 @@ public void testSimpleSampler() throws Exception { assertThat(maxBooksPerAuthor, equalTo(3L)); } + public void testSimpleSamplerShardSize() throws Exception { + final int SHARD_SIZE = randomIntBetween(1, 3); + SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(SHARD_SIZE); + sampleAgg.subAggregation(terms("authors").field("author")); + SearchResponse response = client().prepareSearch("test") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + .get(); + assertSearchResponse(response); + Sampler sample = response.getAggregations().get("sample"); + Terms authors = sample.getAggregations().get("authors"); + assertEquals(SHARD_SIZE * NUM_SHARDS, sample.getDocCount()); + } + public void testUnmappedChildAggNoDiversity() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java index 5f81c76b69385..0f3c9872353c1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java @@ -162,6 +162,6 @@ public InternalAggregation buildEmptyAggregation() { @Override protected boolean supportsConcurrentSegmentSearch() { - return true; + return false; } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java index d3db8a66ee21f..51d9830d3cea0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java @@ -75,6 +75,6 @@ public Aggregator createInternal( @Override protected boolean supportsConcurrentSegmentSearch() { - return true; + return false; } } From 747f7d1550c5d9ffe9951902d89a15ed60cdf39f Mon Sep 17 00:00:00 2001 From: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Date: Sat, 4 Nov 2023 20:47:20 -0700 Subject: [PATCH 124/178] Fixing concurrent search tests with one slice (#11071) * Fixing concurrent search tests with one slice (#11071) Signed-off-by: Ticheng Lin * Remove changes for non-flaky tests (#11071) Signed-off-by: Ticheng Lin --------- Signed-off-by: Ticheng Lin --- .../search/nested/SimpleNestedIT.java | 8 ++++++++ .../opensearch/search/pit/PitMultiNodeIT.java | 1 + .../search/preference/SearchPreferenceIT.java | 10 ++++++---- .../opensearch/search/query/QueryStringIT.java | 6 ++++++ .../search/query/ScriptScoreQueryIT.java | 12 ++++++++---- .../search/query/SimpleQueryStringIT.java | 17 ++++++++++++++++- 6 files changed, 45 insertions(+), 9 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index 83dec7b27a897..656e7b2e366ed 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -293,6 +293,7 @@ public void testMultiNested() throws Exception { refresh(); // check the numDocs assertDocumentCount("test", 7); + indexRandomForConcurrentSearch("test"); // do some multi nested queries SearchResponse searchResponse = client().prepareSearch("test") @@ -485,6 +486,7 @@ public void testExplain() throws Exception { ) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total)) @@ -968,6 +970,10 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { // https://github.com/elastic/elasticsearch/issues/31554 public void testLeakingSortValues() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11065", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 1)) .setMapping( @@ -1035,6 +1041,7 @@ public void testLeakingSortValues() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(termQuery("_id", 2)) @@ -1627,6 +1634,7 @@ public void testCheckFixedBitSetCache() throws Exception { client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); ensureSearchable("test"); + indexRandomForConcurrentSearch("test"); // No nested mapping yet, there shouldn't be anything in the fixed bit set cache ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java index e42f12709c948..a3432bfe7e3e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java @@ -100,6 +100,7 @@ public void clearIndex() { public void testPit() throws Exception { CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); + indexRandomForConcurrentSearch("index"); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); SearchResponse searchResponse = client().prepareSearch("index") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java index 425764b1c88d2..97fe05f5b9747 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java @@ -52,7 +52,6 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -99,7 +98,7 @@ public Settings nodeSettings(int nodeOrdinal) { } // see #2896 - public void testStopOneNodePreferenceWithRedState() throws IOException { + public void testStopOneNodePreferenceWithRedState() throws Exception { assertAcked( prepareCreate("test").setSettings( Settings.builder().put("index.number_of_shards", cluster().numDataNodes() + 2).put("index.number_of_replicas", 0) @@ -110,6 +109,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { client().prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); } refresh(); + indexRandomForConcurrentSearch("test"); internalCluster().stopRandomDataNode(); client().admin().cluster().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).get(); String[] preferences = new String[] { @@ -138,7 +138,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); } - public void testNoPreferenceRandom() { + public void testNoPreferenceRandom() throws Exception { assertAcked( prepareCreate("test").setSettings( // this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data @@ -149,6 +149,7 @@ public void testNoPreferenceRandom() { client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); final Client client = internalCluster().smartClient(); SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).get(); @@ -201,7 +202,7 @@ public void testThatSpecifyingNonExistingNodesReturnsUsefulError() { } } - public void testNodesOnlyRandom() { + public void testNodesOnlyRandom() throws Exception { assertAcked( prepareCreate("test").setSettings( // this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data @@ -211,6 +212,7 @@ public void testNodesOnlyRandom() { ensureGreen(); client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); final Client client = internalCluster().smartClient(); // multiple wildchar to cover multi-param usecase diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java index 099eb934f4f4d..1ca5859f23bca 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java @@ -186,6 +186,7 @@ public void testDocWithAllTypes() throws Exception { String docBody = copyToStringFromClasspath("/org/opensearch/search/query/all-example-document.json"); reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, MediaTypeRegistry.JSON)); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get(); assertHits(resp.getHits(), "1"); @@ -225,6 +226,7 @@ public void testKeywordWithWhitespace() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar")); reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get(); assertHits(resp.getHits(), "3"); @@ -245,6 +247,7 @@ public void testRegexCaseInsensitivity() throws Exception { indexRequests.add(client().prepareIndex("messages").setId("1").setSource("message", "message: this is a TLS handshake")); indexRequests.add(client().prepareIndex("messages").setId("2").setSource("message", "message: this is a tcp handshake")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("messages"); SearchResponse response = client().prepareSearch("messages").setQuery(queryStringQuery("/TLS/").defaultField("message")).get(); assertNoFailures(response); @@ -282,6 +285,7 @@ public void testAllFields() throws Exception { List reqs = new ArrayList<>(); reqs.add(client().prepareIndex("test_1").setId("1").setSource("f1", "foo", "f2", "eggplant")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test_1"); SearchResponse resp = client().prepareSearch("test_1") .setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.AND)) @@ -374,6 +378,7 @@ public void testLimitOnExpandedFields() throws Exception { client().prepareIndex("testindex").setId("1").setSource("field_A0", "foo bar baz").get(); refresh(); + indexRandomForConcurrentSearch("testindex"); // single field shouldn't trigger the limit doAssertOneHitForQueryString("field_A0:foo"); @@ -465,6 +470,7 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { List indexRequests = new ArrayList<>(); indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); // The wildcard field matches aliases for both a text and geo_point field. // By default, the geo_point field should be ignored when building the query. diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java index 7ba582811bbc2..55029712a061c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java @@ -109,13 +109,14 @@ protected Map, Object>> pluginScripts() { // 1) only matched docs retrieved // 2) score is calculated based on a script with params // 3) min score applied - public void testScriptScore() { + public void testScriptScore() throws Exception { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); Map params = new HashMap<>(); params.put("param1", 0.1); @@ -135,13 +136,14 @@ public void testScriptScore() { assertOrderedSearchHits(resp, "10", "8", "6"); } - public void testScriptScoreBoolQuery() { + public void testScriptScoreBoolQuery() throws Exception { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + i, "field2", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); Map params = new HashMap<>(); params.put("param1", 0.1); @@ -155,7 +157,7 @@ public void testScriptScoreBoolQuery() { } // test that when the internal query is rewritten script_score works well - public void testRewrittenQuery() { + public void testRewrittenQuery() throws Exception { assertAcked( prepareCreate("test-index2").setSettings(Settings.builder().put("index.number_of_shards", 1)) .setMapping("field1", "type=date", "field2", "type=double") @@ -164,6 +166,7 @@ public void testRewrittenQuery() { client().prepareIndex("test-index2").setId("2").setSource("field1", "2019-10-01", "field2", 2).get(); client().prepareIndex("test-index2").setId("3").setSource("field1", "2019-11-01", "field2", 3).get(); refresh(); + indexRandomForConcurrentSearch("test-index2"); RangeQueryBuilder rangeQB = new RangeQueryBuilder("field1").from("2019-01-01"); // the query should be rewritten to from:null Map params = new HashMap<>(); @@ -174,7 +177,7 @@ public void testRewrittenQuery() { assertOrderedSearchHits(resp, "3", "2", "1"); } - public void testDisallowExpensiveQueries() { + public void testDisallowExpensiveQueries() throws Exception { try { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; @@ -182,6 +185,7 @@ public void testDisallowExpensiveQueries() { client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); Map params = new HashMap<>(); params.put("param1", 0.1); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java index 384d2b7423e66..017d28ef3a2a6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java @@ -150,6 +150,7 @@ public void testSimpleQueryString() throws ExecutionException, InterruptedExcept client().prepareIndex("test").setId("5").setSource("body", "quux baz spaghetti"), client().prepareIndex("test").setId("6").setSource("otherbody", "spaghetti") ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar")).get(); assertHitCount(searchResponse, 3L); @@ -199,6 +200,7 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { client().prepareIndex("test").setId("3").setSource("body", "foo bar"), client().prepareIndex("test").setId("4").setSource("body", "foo baz bar") ); + indexRandomForConcurrentSearch("test"); logger.info("--> query 1"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").minimumShouldMatch("2")).get(); @@ -235,6 +237,7 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { client().prepareIndex("test").setId("7").setSource("body2", "foo bar", "other", "foo"), client().prepareIndex("test").setId("8").setSource("body2", "foo baz bar", "other", "foo") ); + indexRandomForConcurrentSearch("test"); logger.info("--> query 5"); searchResponse = client().prepareSearch() @@ -256,7 +259,7 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { assertSearchHits(searchResponse, "6", "7", "8"); } - public void testNestedFieldSimpleQueryString() throws IOException { + public void testNestedFieldSimpleQueryString() throws Exception { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -275,6 +278,7 @@ public void testNestedFieldSimpleQueryString() throws IOException { ); client().prepareIndex("test").setId("1").setSource("body", "foo bar baz").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body")).get(); assertHitCount(searchResponse, 1L); @@ -359,6 +363,8 @@ public void testSimpleQueryStringLenient() throws ExecutionException, Interrupte client().prepareIndex("test2").setId("10").setSource("field", 5) ); refresh(); + indexRandomForConcurrentSearch("test1"); + indexRandomForConcurrentSearch("test2"); SearchResponse searchResponse = client().prepareSearch() .setAllowPartialSearchResults(true) @@ -419,6 +425,7 @@ public void testSimpleQueryStringUsesFieldAnalyzer() throws Exception { client().prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("123").field("foo").field("bar")).get(); assertHitCount(searchResponse, 1L); @@ -469,6 +476,7 @@ public void testBasicAllQuery() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f2", "Bar")); reqs.add(client().prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); assertHitCount(resp, 2L); @@ -492,6 +500,7 @@ public void testWithDate() throws Exception { reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02")); reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); assertHits(resp.getHits(), "1", "2"); @@ -523,6 +532,7 @@ public void testWithLotsOfTypes() throws Exception { client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2") ); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); assertHits(resp.getHits(), "1", "2"); @@ -550,6 +560,7 @@ public void testDocWithAllTypes() throws Exception { String docBody = copyToStringFromClasspath("/org/opensearch/search/query/all-example-document.json"); reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, MediaTypeRegistry.JSON)); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); assertHits(resp.getHits(), "1"); @@ -596,6 +607,7 @@ public void testKeywordWithWhitespace() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar")); reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); assertHits(resp.getHits(), "3"); @@ -663,6 +675,7 @@ public void testFieldAlias() throws Exception { indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_alias")).get(); @@ -681,6 +694,7 @@ public void testFieldAliasWithWildcardField() throws Exception { indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_*")).get(); @@ -697,6 +711,7 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { List indexRequests = new ArrayList<>(); indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); // The wildcard field matches aliases for both a text and boolean field. // By default, the boolean field should be ignored when building the query. From c1962cc7c6a9389cc31d28befd65298ac4f003ad Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Sun, 5 Nov 2023 20:47:43 -0800 Subject: [PATCH 125/178] Fix flaky pit/scroll tests in SegmentReplicationIT (#10770) This change adds an assertBusy to wait until files are cleared from disk as it is not synchronous with the scroll/pit removal. Signed-off-by: Marc Handalian --- .../replication/SegmentReplicationIT.java | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index a2996d87a851b..9c93a8f85db8e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -1066,9 +1066,14 @@ public void testScrollCreatedOnReplica() throws Exception { client(replica).prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); - currentFiles = List.of(replicaShard.store().directory().listAll()); - assertFalse("Files should be cleaned up post scroll clear request", currentFiles.containsAll(snapshottedSegments)); + assertBusy( + () -> assertFalse( + "Files should be cleaned up post scroll clear request", + List.of(replicaShard.store().directory().listAll()).containsAll(snapshottedSegments) + ) + ); assertEquals(100, scrollHits); + } /** @@ -1327,9 +1332,12 @@ public void testPitCreatedOnReplica() throws Exception { // delete the PIT DeletePitRequest deletePITRequest = new DeletePitRequest(pitResponse.getId()); client().execute(DeletePitAction.INSTANCE, deletePITRequest).actionGet(); - - currentFiles = List.of(replicaShard.store().directory().listAll()); - assertFalse("Files should be cleaned up", currentFiles.containsAll(snapshottedSegments)); + assertBusy( + () -> assertFalse( + "Files should be cleaned up", + List.of(replicaShard.store().directory().listAll()).containsAll(snapshottedSegments) + ) + ); } /** From 1782b906a0a9bf293bbc8e7f33058153790827dc Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 6 Nov 2023 12:21:05 -0500 Subject: [PATCH 126/178] [Streaming Indexing] Introduce new experimental HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) (#9672) * [Streaming Indexing] Introduce new experimental server HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) Signed-off-by: Andriy Redko * Remove HttpChunk for now since it is not used in non-streaming APIs Signed-off-by: Andriy Redko * Fix postmerge issues Signed-off-by: Andriy Redko --------- Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 4 + .../netty4/Netty4HttpServerTransport.java | 14 + .../licenses/reactive-streams-1.0.4.jar.sha1 | 1 - .../licenses/reactive-streams-LICENSE.txt | 21 - plugins/repository-azure/build.gradle | 22 +- .../licenses/reactive-streams-1.0.4.jar.sha1 | 1 - .../licenses/reactive-streams-LICENSE.txt | 21 - .../licenses/reactor-core-3.5.6.jar.sha1 | 1 - .../licenses/reactor-netty-1.1.8.jar.sha1 | 1 - .../reactor-netty-core-1.1.12.jar.sha1 | 1 + .../reactor-netty-core-1.1.8.jar.sha1 | 1 - .../reactor-netty-http-1.1.12.jar.sha1 | 1 + .../reactor-netty-http-1.1.9.jar.sha1 | 1 - plugins/repository-s3/build.gradle | 1 - .../licenses/reactive-streams-1.0.4.jar.sha1 | 1 - .../licenses/reactive-streams-LICENSE.txt | 21 - .../licenses/reactive-streams-NOTICE.txt | 0 plugins/transport-reactor-netty4/build.gradle | 264 ++++++++ .../licenses/netty-LICENSE.txt | 202 ++++++ .../licenses/netty-NOTICE.txt | 116 ++++ .../netty-buffer-4.1.100.Final.jar.sha1 | 1 + .../netty-codec-4.1.100.Final.jar.sha1 | 1 + .../netty-codec-dns-4.1.100.Final.jar.sha1 | 1 + .../netty-codec-http-4.1.100.Final.jar.sha1 | 1 + .../netty-codec-http2-4.1.100.Final.jar.sha1 | 1 + .../netty-common-4.1.100.Final.jar.sha1 | 1 + .../netty-handler-4.1.100.Final.jar.sha1 | 1 + .../netty-resolver-4.1.100.Final.jar.sha1 | 1 + .../netty-resolver-dns-4.1.100.Final.jar.sha1 | 1 + .../netty-transport-4.1.100.Final.jar.sha1 | 1 + ...-native-unix-common-4.1.100.Final.jar.sha1 | 1 + .../licenses/reactor-LICENSE.txt | 201 ++++++ .../licenses/reactor-NOTICE.txt} | 0 .../reactor-netty-core-1.1.12.jar.sha1 | 1 + .../reactor-netty-http-1.1.12.jar.sha1 | 1 + .../OpenSearchReactorNetty4IntegTestCase.java | 73 +++ .../ReactorNetty4HttpRequestSizeLimitIT.java | 159 +++++ .../netty4/ReactorNetty4PipeliningIT.java | 68 ++ .../reactor/netty4/HttpConversionUtil.java | 47 ++ .../netty4/NonStreamingHttpChannel.java | 76 +++ .../netty4/NonStreamingRequestConsumer.java | 104 ++++ .../netty4/ReactorNetty4HttpRequest.java | 272 ++++++++ .../netty4/ReactorNetty4HttpResponse.java | 42 ++ .../ReactorNetty4HttpServerChannel.java | 53 ++ .../ReactorNetty4HttpServerTransport.java | 313 ++++++++++ .../http/reactor/netty4/package-info.java | 12 + .../reactor/ReactorNetty4Plugin.java | 109 ++++ .../transport/reactor/SharedGroupFactory.java | 164 +++++ .../transport/reactor/netty4/Netty4Utils.java | 142 +++++ .../netty4/ReactorNetty4Transport.java | 35 ++ .../reactor/netty4/package-info.java | 12 + .../transport/reactor/package-info.java | 12 + .../plugin-metadata/plugin-security.policy | 24 + .../reactor/netty4/ReactorHttpClient.java | 208 +++++++ .../netty4/ReactorNetty4BadRequestTests.java | 122 ++++ ...ReactorNetty4HttpServerTransportTests.java | 579 ++++++++++++++++++ server/build.gradle | 8 +- .../licenses/reactive-streams-1.0.4.jar.sha1 | 0 .../licenses/reactive-streams-LICENSE.txt | 0 .../licenses/reactive-streams-NOTICE.txt | 0 server/licenses/reactor-LICENSE.txt | 201 ++++++ .../licenses/reactor-NOTICE.txt | 0 server/licenses/reactor-core-3.5.11.jar.sha1 | 1 + .../common/settings/ClusterSettings.java | 1 + .../http/HttpTransportSettings.java | 8 + .../opensearch/test/OpenSearchTestCase.java | 3 + 67 files changed, 3668 insertions(+), 90 deletions(-) delete mode 100644 plugins/discovery-ec2/licenses/reactive-streams-1.0.4.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/reactive-streams-LICENSE.txt delete mode 100644 plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactive-streams-LICENSE.txt delete mode 100644 plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.9.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/reactive-streams-LICENSE.txt delete mode 100644 plugins/repository-s3/licenses/reactive-streams-NOTICE.txt create mode 100644 plugins/transport-reactor-netty4/build.gradle create mode 100644 plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt create mode 100644 plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt create mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt rename plugins/{crypto-kms/licenses/reactive-streams-NOTICE.txt => transport-reactor-netty4/licenses/reactor-NOTICE.txt} (100%) create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java create mode 100644 plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java create mode 100644 plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java create mode 100644 plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java create mode 100644 plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy create mode 100644 plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java create mode 100644 plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java create mode 100644 plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java rename {plugins/crypto-kms => server}/licenses/reactive-streams-1.0.4.jar.sha1 (100%) rename {plugins/crypto-kms => server}/licenses/reactive-streams-LICENSE.txt (100%) rename {plugins/discovery-ec2 => server}/licenses/reactive-streams-NOTICE.txt (100%) create mode 100644 server/licenses/reactor-LICENSE.txt rename plugins/repository-azure/licenses/reactive-streams-NOTICE.txt => server/licenses/reactor-NOTICE.txt (100%) create mode 100644 server/licenses/reactor-core-3.5.11.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b8f1084eafd4..a71d2ff537834 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -100,6 +100,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) - Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) +- [Streaming Indexing] Introduce new experimental server HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) ([#9672](https://github.com/opensearch-project/OpenSearch/pull/9672)) ### Dependencies - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 0d98cba35448f..f19437979c852 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -29,6 +29,10 @@ jna = 5.13.0 netty = 4.1.100.Final joda = 2.12.2 +# project reactor +reactor_netty = 1.1.12 +reactor = 3.5.11 + # client dependencies httpclient5 = 5.2.1 httpcore5 = 5.2.2 diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index 1677f333a4b1c..4970c42163ac3 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -116,6 +116,9 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; import static org.opensearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; +/** + * The HTTP transport implementations based on Netty 4. + */ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { private static final Logger logger = LogManager.getLogger(Netty4HttpServerTransport.class); @@ -184,6 +187,17 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { private volatile ServerBootstrap serverBootstrap; private volatile SharedGroupFactory.SharedGroup sharedGroup; + /** + * Creates new HTTP transport implementations based on Netty 4 + * @param settings seetings + * @param networkService network service + * @param bigArrays big array allocator + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param sharedGroupFactory shared group factory + */ public Netty4HttpServerTransport( Settings settings, NetworkService networkService, diff --git a/plugins/discovery-ec2/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/discovery-ec2/licenses/reactive-streams-1.0.4.jar.sha1 deleted file mode 100644 index 45a80e3f7e361..0000000000000 --- a/plugins/discovery-ec2/licenses/reactive-streams-1.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/reactive-streams-LICENSE.txt b/plugins/discovery-ec2/licenses/reactive-streams-LICENSE.txt deleted file mode 100644 index 1e3c7e7c77495..0000000000000 --- a/plugins/discovery-ec2/licenses/reactive-streams-LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT No Attribution - -Copyright 2014 Reactive Streams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 26e2b4813b8a5..51f2057b4bedb 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -56,11 +56,8 @@ dependencies { api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') api 'com.azure:azure-storage-blob:12.23.0' - api 'org.reactivestreams:reactive-streams:1.0.4' - api 'io.projectreactor:reactor-core:3.5.6' - api 'io.projectreactor.netty:reactor-netty:1.1.8' - api 'io.projectreactor.netty:reactor-netty-core:1.1.8' - api 'io.projectreactor.netty:reactor-netty-http:1.1.9' + api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" + api "io.projectreactor.netty:reactor-netty-http:${versions.reactor_netty}" api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" @@ -101,10 +98,6 @@ thirdPartyAudit { 'com.azure.storage.internal.avro.implementation.schema.AvroSchema', 'com.ctc.wstx.shaded.msv_core.driver.textui.Driver', 'io.micrometer.context.ContextAccessor', - 'io.micrometer.context.ContextRegistry', - 'io.micrometer.context.ContextSnapshot', - 'io.micrometer.context.ContextSnapshot$Scope', - 'io.micrometer.core.instrument.Clock', 'io.micrometer.core.instrument.Counter', 'io.micrometer.core.instrument.Counter$Builder', 'io.micrometer.core.instrument.DistributionSummary', @@ -114,14 +107,10 @@ thirdPartyAudit { 'io.micrometer.core.instrument.Meter', 'io.micrometer.core.instrument.MeterRegistry', 'io.micrometer.core.instrument.Metrics', - 'io.micrometer.core.instrument.Tag', - 'io.micrometer.core.instrument.Tags', 'io.micrometer.core.instrument.Timer', 'io.micrometer.core.instrument.Timer$Builder', 'io.micrometer.core.instrument.Timer$Sample', - 'io.micrometer.core.instrument.binder.jvm.ExecutorServiceMetrics', 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', - 'io.micrometer.core.instrument.search.Search', 'io.netty.channel.epoll.Epoll', 'io.netty.channel.epoll.EpollDatagramChannel', 'io.netty.channel.epoll.EpollServerSocketChannel', @@ -168,9 +157,6 @@ thirdPartyAudit { 'org.slf4j.impl.StaticLoggerBinder', 'org.slf4j.impl.StaticMDCBinder', 'org.slf4j.impl.StaticMarkerBinder', - 'reactor.blockhound.BlockHound$Builder', - 'reactor.blockhound.integration.BlockHoundIntegration', - 'io.micrometer.context.ThreadLocalAccessor', 'io.micrometer.common.KeyValue', 'io.micrometer.common.KeyValues', 'io.micrometer.common.docs.KeyName', @@ -190,6 +176,7 @@ thirdPartyAudit { 'io.micrometer.tracing.handler.PropagatingSenderTracingObservationHandler', 'io.micrometer.tracing.propagation.Propagator', 'io.micrometer.core.instrument.observation.MeterObservationHandler', + 'io.micrometer.core.instrument.Tags', 'io.micrometer.observation.ObservationHandler', 'io.micrometer.observation.ObservationRegistry', 'io.micrometer.observation.ObservationRegistry$ObservationConfig', @@ -210,8 +197,7 @@ thirdPartyAudit { 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', - 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', - 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException' + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1' ) } diff --git a/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 deleted file mode 100644 index 45a80e3f7e361..0000000000000 --- a/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt b/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt deleted file mode 100644 index 1e3c7e7c77495..0000000000000 --- a/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT No Attribution - -Copyright 2014 Reactive Streams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 deleted file mode 100644 index ad9b7263e7b38..0000000000000 --- a/plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -027fdc551537b349389176a23a192f11a7a3d7de \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 deleted file mode 100644 index 6b6bf1903b16c..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d53a9d7d0395285f4c81664494fcd61477626e32 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 new file mode 100644 index 0000000000000..352d69396d0c9 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 @@ -0,0 +1 @@ +378dc5a375e6440099e837b22cf4b01341cbe4ea \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 deleted file mode 100644 index 707631f4dfe0c..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48999c4ae27cdcee5eaff9dfd150a8b64624f0f5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 new file mode 100644 index 0000000000000..1bcb0e0c52950 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 @@ -0,0 +1 @@ +e839fadb8f45d8a7a2783466faedd03373366c23 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.9.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.9.jar.sha1 deleted file mode 100644 index 96deead2c75d1..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -408b3037133f2e8ab0f195ccd3f807026be9b860 \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 44fd45b265e82..560d12d14395d 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -70,7 +70,6 @@ dependencies { api "software.amazon.awssdk:sts:${versions.aws}" api "software.amazon.awssdk:netty-nio-client:${versions.aws}" - api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" api "commons-logging:commons-logging:${versions.commonslogging}" diff --git a/plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 deleted file mode 100644 index 45a80e3f7e361..0000000000000 --- a/plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/reactive-streams-LICENSE.txt b/plugins/repository-s3/licenses/reactive-streams-LICENSE.txt deleted file mode 100644 index 1e3c7e7c77495..0000000000000 --- a/plugins/repository-s3/licenses/reactive-streams-LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT No Attribution - -Copyright 2014 Reactive Streams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-s3/licenses/reactive-streams-NOTICE.txt b/plugins/repository-s3/licenses/reactive-streams-NOTICE.txt deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle new file mode 100644 index 0000000000000..7d7eb330b4a55 --- /dev/null +++ b/plugins/transport-reactor-netty4/build.gradle @@ -0,0 +1,264 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +import org.opensearch.gradle.info.BuildParams +import org.opensearch.gradle.test.RestIntegTestTask +import org.opensearch.gradle.test.TestTask +import org.opensearch.gradle.test.rest.JavaRestTestPlugin +import org.opensearch.gradle.test.InternalClusterTestPlugin + +apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.java-rest-test' +apply plugin: 'opensearch.internal-cluster-test' + +// The transport-reactor-netty4 plugin is published to maven +apply plugin: 'opensearch.publish' + +opensearchplugin { + description 'Reactor Netty 4 based transport implementation' + classname 'org.opensearch.transport.reactor.ReactorNetty4Plugin' + hasClientJar = true +} + +dependencies { + // network stack + api "io.netty:netty-buffer:${versions.netty}" + api "io.netty:netty-codec:${versions.netty}" + api "io.netty:netty-codec-dns:${versions.netty}" + api "io.netty:netty-codec-http:${versions.netty}" + api "io.netty:netty-codec-http2:${versions.netty}" + api "io.netty:netty-common:${versions.netty}" + api "io.netty:netty-handler:${versions.netty}" + api "io.netty:netty-resolver-dns:${versions.netty}" + api "io.netty:netty-resolver:${versions.netty}" + api "io.netty:netty-transport:${versions.netty}" + api "io.netty:netty-transport-native-unix-common:${versions.netty}" + + api "io.projectreactor.netty:reactor-netty-http:${versions.reactor_netty}" + api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" + + testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" + testImplementation "io.projectreactor:reactor-test:${versions.reactor}" + testImplementation project(":modules:transport-netty4") +} + +restResources { + restApi { + includeCore '_common', 'cluster', 'nodes' + } +} + +tasks.named("dependencyLicenses").configure { + mapping from: /netty-.*/, to: 'netty' + mapping from: /reactor-.*/, to: 'reactor' +} + +// TODO: Remove that once we have a complete test suite +testingConventions.enabled = false + +test { + /* + * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each + * other if we allow them to set the number of available processors as it's set-once in Netty. + */ + systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' +} + +internalClusterTest { + systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' +} + +javaRestTest { + systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' +} + +thirdPartyAudit { + ignoreMissingClasses( + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', + // classes are missing + + // from io.netty.logging.CommonsLoggerFactory (netty) + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + + // from io.netty.handler.ssl.OpenSslEngine (netty) + 'io.netty.internal.tcnative.Buffer', + 'io.netty.internal.tcnative.CertificateCompressionAlgo', + 'io.netty.internal.tcnative.Library', + 'io.netty.internal.tcnative.SSL', + 'io.netty.internal.tcnative.SSLContext', + 'io.netty.internal.tcnative.SSLPrivateKeyMethod', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + // from io.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional + 'org.slf4j.helpers.FormattingTuple', + 'org.slf4j.helpers.MessageFormatter', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + 'org.slf4j.spi.LocationAwareLogger', + + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFChunk', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + 'lzma.sdk.lzma.Encoder', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + 'net.jpountz.xxhash.XXHash32', + 'net.jpountz.xxhash.XXHashFactory', + 'io.netty.internal.tcnative.AsyncSSLPrivateKeyMethod', + 'io.netty.internal.tcnative.AsyncTask', + 'io.netty.internal.tcnative.CertificateCallback', + 'io.netty.internal.tcnative.CertificateVerifier', + 'io.netty.internal.tcnative.ResultCallback', + 'io.netty.internal.tcnative.SessionTicketKey', + 'io.netty.internal.tcnative.SniHostNameMatcher', + 'io.netty.internal.tcnative.SSL', + 'io.netty.internal.tcnative.SSLSession', + 'io.netty.internal.tcnative.SSLSessionCache', + 'io.netty.channel.epoll.Epoll', + 'io.netty.channel.epoll.EpollDatagramChannel', + 'io.netty.channel.epoll.EpollServerSocketChannel', + 'io.netty.channel.epoll.EpollSocketChannel', + 'io.netty.channel.kqueue.KQueue', + 'io.netty.channel.kqueue.KQueueDatagramChannel', + 'io.netty.channel.kqueue.KQueueServerSocketChannel', + 'io.netty.channel.kqueue.KQueueSocketChannel', + 'io.netty.handler.codec.haproxy.HAProxyMessage', + 'io.netty.handler.codec.haproxy.HAProxyMessageDecoder', + 'io.netty.handler.proxy.ProxyHandler', + 'io.netty.incubator.channel.uring.IOUring', + 'io.netty.incubator.channel.uring.IOUringDatagramChannel', + 'io.netty.incubator.channel.uring.IOUringServerSocketChannel', + 'io.netty.incubator.channel.uring.IOUringSocketChannel', + + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.HandshakeListener', + + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration', + + 'io.micrometer.common.KeyValue', + 'io.micrometer.common.KeyValues', + 'io.micrometer.common.docs.KeyName', + 'io.micrometer.context.ContextAccessor', + 'io.micrometer.core.instrument.Counter', + 'io.micrometer.core.instrument.Counter$Builder', + 'io.micrometer.core.instrument.DistributionSummary', + 'io.micrometer.core.instrument.DistributionSummary$Builder', + 'io.micrometer.core.instrument.Gauge', + 'io.micrometer.core.instrument.Gauge$Builder', + 'io.micrometer.core.instrument.Meter', + 'io.micrometer.core.instrument.Meter$Type', + 'io.micrometer.core.instrument.MeterRegistry', + 'io.micrometer.core.instrument.Metrics', + 'io.micrometer.core.instrument.Tags', + 'io.micrometer.core.instrument.Timer', + 'io.micrometer.core.instrument.Timer$Builder', + 'io.micrometer.core.instrument.Timer$Sample', + 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', + 'io.micrometer.core.instrument.docs.MeterDocumentation', + 'io.micrometer.core.instrument.observation.MeterObservationHandler', + 'io.micrometer.observation.Observation', + 'io.micrometer.observation.Observation$Context', + 'io.micrometer.observation.ObservationHandler', + 'io.micrometer.observation.ObservationRegistry', + 'io.micrometer.observation.ObservationRegistry$ObservationConfig', + 'io.micrometer.observation.docs.ObservationDocumentation', + 'io.micrometer.observation.transport.ReceiverContext', + 'io.micrometer.observation.transport.RequestReplyReceiverContext', + 'io.micrometer.observation.transport.RequestReplySenderContext', + 'io.micrometer.observation.transport.SenderContext', + 'io.micrometer.tracing.Span', + 'io.micrometer.tracing.Tracer', + 'io.micrometer.tracing.docs.SpanDocumentation', + 'io.micrometer.tracing.handler.DefaultTracingObservationHandler', + 'io.micrometer.tracing.handler.PropagatingReceiverTracingObservationHandler', + 'io.micrometer.tracing.handler.PropagatingSenderTracingObservationHandler', + 'io.micrometer.tracing.propagation.Propagator' + ) + + ignoreViolations( + 'io.netty.util.internal.PlatformDependent0', + 'io.netty.util.internal.PlatformDependent0$1', + 'io.netty.util.internal.PlatformDependent0$2', + 'io.netty.util.internal.PlatformDependent0$3', + 'io.netty.util.internal.PlatformDependent0$4', + 'io.netty.util.internal.PlatformDependent0$6', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5' + ) +} diff --git a/plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt b/plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt b/plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt new file mode 100644 index 0000000000000..5bbf91a14de23 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt @@ -0,0 +1,116 @@ + + The Netty Project + ================= + +Please visit the Netty web site for more information: + + * http://netty.io/ + +Copyright 2011 The Netty Project + +The Netty Project licenses this file to you under the Apache License, +version 2.0 (the "License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +Also, please refer to each LICENSE..txt file, which is located in +the 'license' directory of the distribution file, for the license terms of the +components that this product depends on. + +------------------------------------------------------------------------------- +This product contains the extensions to Java Collections Framework which has +been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: + + * LICENSE: + * license/LICENSE.jsr166y.txt (Public Domain) + * HOMEPAGE: + * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ + * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ + +This product contains a modified version of Robert Harder's Public Domain +Base64 Encoder and Decoder, which can be obtained at: + + * LICENSE: + * license/LICENSE.base64.txt (Public Domain) + * HOMEPAGE: + * http://iharder.sourceforge.net/current/java/base64/ + +This product contains a modified version of 'JZlib', a re-implementation of +zlib in pure Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.jzlib.txt (BSD Style License) + * HOMEPAGE: + * http://www.jcraft.com/jzlib/ + +This product contains a modified version of 'Webbit', a Java event based +WebSocket and HTTP server: + + * LICENSE: + * license/LICENSE.webbit.txt (BSD License) + * HOMEPAGE: + * https://github.com/joewalnes/webbit + +This product optionally depends on 'Protocol Buffers', Google's data +interchange format, which can be obtained at: + + * LICENSE: + * license/LICENSE.protobuf.txt (New BSD License) + * HOMEPAGE: + * http://code.google.com/p/protobuf/ + +This product optionally depends on 'Bouncy Castle Crypto APIs' to generate +a temporary self-signed X.509 certificate when the JVM does not provide the +equivalent functionality. It can be obtained at: + + * LICENSE: + * license/LICENSE.bouncycastle.txt (MIT License) + * HOMEPAGE: + * http://www.bouncycastle.org/ + +This product optionally depends on 'SLF4J', a simple logging facade for Java, +which can be obtained at: + + * LICENSE: + * license/LICENSE.slf4j.txt (MIT License) + * HOMEPAGE: + * http://www.slf4j.org/ + +This product optionally depends on 'Apache Commons Logging', a logging +framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-logging.txt (Apache License 2.0) + * HOMEPAGE: + * http://commons.apache.org/logging/ + +This product optionally depends on 'Apache Log4J', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.log4j.txt (Apache License 2.0) + * HOMEPAGE: + * http://logging.apache.org/log4j/ + +This product optionally depends on 'JBoss Logging', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.jboss-logging.txt (GNU LGPL 2.1) + * HOMEPAGE: + * http://anonsvn.jboss.org/repos/common/common-logging-spi/ + +This product optionally depends on 'Apache Felix', an open source OSGi +framework implementation, which can be obtained at: + + * LICENSE: + * license/LICENSE.felix.txt (Apache License 2.0) + * HOMEPAGE: + * http://felix.apache.org/ diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..dfa4a0fbea94c --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +663b1b7bf3ff0f12fde4df20c72d9e94584ebffa \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..0948daa05fff6 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +62dbdf5f25eda75ea8456be1ed72b3fcb0d18774 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt b/plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt new file mode 100644 index 0000000000000..e5583c184e67a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/reactive-streams-NOTICE.txt b/plugins/transport-reactor-netty4/licenses/reactor-NOTICE.txt similarity index 100% rename from plugins/crypto-kms/licenses/reactive-streams-NOTICE.txt rename to plugins/transport-reactor-netty4/licenses/reactor-NOTICE.txt diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 new file mode 100644 index 0000000000000..352d69396d0c9 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 @@ -0,0 +1 @@ +378dc5a375e6440099e837b22cf4b01341cbe4ea \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 new file mode 100644 index 0000000000000..1bcb0e0c52950 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 @@ -0,0 +1 @@ +e839fadb8f45d8a7a2783466faedd03373366c23 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java new file mode 100644 index 0000000000000..abbd50bf1b235 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch; + +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.transport.Netty4ModulePlugin; +import org.opensearch.transport.reactor.ReactorNetty4Plugin; +import org.opensearch.transport.reactor.netty4.ReactorNetty4Transport; + +import java.util.Collection; +import java.util.List; + +public abstract class OpenSearchReactorNetty4IntegTestCase extends OpenSearchIntegTestCase { + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected boolean addMockTransportService() { + return false; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + // randomize netty settings + if (randomBoolean()) { + builder.put(ReactorNetty4Transport.SETTING_WORKER_COUNT.getKey(), random().nextInt(3) + 1); + } + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4ModulePlugin.NETTY_TRANSPORT_NAME); + builder.put(NetworkModule.HTTP_TYPE_KEY, ReactorNetty4Plugin.REACTOR_NETTY_HTTP_TRANSPORT_NAME); + return builder.build(); + } + + @Override + protected Collection> nodePlugins() { + return List.of(ReactorNetty4Plugin.class, Netty4ModulePlugin.class); + } +} diff --git a/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java new file mode 100644 index 0000000000000..833d60375a2bd --- /dev/null +++ b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java @@ -0,0 +1,159 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.OpenSearchReactorNetty4IntegTestCase; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; + +/** + * This test checks that in-flight requests are limited on HTTP level and that requests that are excluded from limiting can pass. + * + * As the same setting is also used to limit in-flight requests on transport level, we avoid transport messages by forcing + * a single node "cluster". + */ +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1) +public class ReactorNetty4HttpRequestSizeLimitIT extends OpenSearchReactorNetty4IntegTestCase { + + private static final ByteSizeValue LIMIT = new ByteSizeValue(2, ByteSizeUnit.KB); + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), LIMIT) + .build(); + } + + public void testLimitsInFlightRequests() throws Exception { + ensureGreen(); + + // we use the limit size as a (very) rough indication on how many requests we should sent to hit the limit + int numRequests = LIMIT.bytesAsInt() / 100; + + StringBuilder bulkRequest = new StringBuilder(); + for (int i = 0; i < numRequests; i++) { + bulkRequest.append("{\"index\": {}}"); + bulkRequest.append(System.lineSeparator()); + bulkRequest.append("{ \"field\" : \"value\" }"); + bulkRequest.append(System.lineSeparator()); + } + + List> requests = new ArrayList<>(); + for (int i = 0; i < 150; i++) { + requests.add(Tuple.tuple("/index/_bulk", bulkRequest)); + } + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (ReactorHttpClient nettyHttpClient = ReactorHttpClient.create(false)) { + final Collection singleResponse = nettyHttpClient.post(transportAddress.address(), requests.subList(0, 1)); + try { + assertThat(singleResponse, hasSize(1)); + assertAtLeastOnceExpectedStatus(singleResponse, HttpResponseStatus.OK); + + final Collection multipleResponses = nettyHttpClient.post(transportAddress.address(), requests); + try { + assertThat(multipleResponses, hasSize(requests.size())); + assertAtLeastOnceExpectedStatus(multipleResponses, HttpResponseStatus.TOO_MANY_REQUESTS); + } finally { + multipleResponses.forEach(ReferenceCounted::release); + } + } finally { + singleResponse.forEach(ReferenceCounted::release); + } + } + } + + public void testDoesNotLimitExcludedRequests() throws Exception { + ensureGreen(); + + List> requestUris = new ArrayList<>(); + for (int i = 0; i < 1500; i++) { + requestUris.add(Tuple.tuple("/_cluster/settings", "{ \"transient\": {\"search.default_search_timeout\": \"40s\" } }")); + } + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (ReactorHttpClient nettyHttpClient = ReactorHttpClient.create(false)) { + final Collection responses = nettyHttpClient.put(transportAddress.address(), requestUris); + try { + assertThat(responses, hasSize(requestUris.size())); + assertAllInExpectedStatus(responses, HttpResponseStatus.OK); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + + private void assertAtLeastOnceExpectedStatus(Collection responses, HttpResponseStatus expectedStatus) { + long countExpectedStatus = responses.stream().filter(r -> r.status().equals(expectedStatus)).count(); + assertThat("Expected at least one request with status [" + expectedStatus + "]", countExpectedStatus, greaterThan(0L)); + } + + private void assertAllInExpectedStatus(Collection responses, HttpResponseStatus expectedStatus) { + long countUnexpectedStatus = responses.stream().filter(r -> r.status().equals(expectedStatus) == false).count(); + assertThat( + "Expected all requests with status [" + expectedStatus + "] but [" + countUnexpectedStatus + "] requests had a different one", + countUnexpectedStatus, + equalTo(0L) + ); + } + +} diff --git a/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java new file mode 100644 index 0000000000000..c0e43de06f6ff --- /dev/null +++ b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.OpenSearchReactorNetty4IntegTestCase; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; + +import java.util.Collection; +import java.util.Locale; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class ReactorNetty4PipeliningIT extends OpenSearchReactorNetty4IntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + public void testThatNettyHttpServerSupportsPipelining() throws Exception { + String[] requests = new String[] { "/", "/_nodes/stats", "/", "/_cluster/state", "/" }; + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); + TransportAddress transportAddress = randomFrom(boundAddresses); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + Collection responses = client.get(transportAddress.address(), true, requests); + try { + assertThat(responses, hasSize(5)); + + Collection opaqueIds = ReactorHttpClient.returnOpaqueIds(responses); + assertOpaqueIdsInOrder(opaqueIds); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + + private void assertOpaqueIdsInOrder(Collection opaqueIds) { + // check if opaque ids are monotonically increasing + int i = 0; + String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be monotonically increasing, got [%s]", opaqueIds); + for (String opaqueId : opaqueIds) { + assertThat(msg, opaqueId, is(String.valueOf(i++))); + } + } + +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java new file mode 100644 index 0000000000000..bd75227dabd08 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.rest.RestRequest; + +import io.netty.handler.codec.http.HttpMethod; + +final class HttpConversionUtil { + private HttpConversionUtil() {} + + /** + * Converts {@link HttpMethod} to {@link RestRequest.Method} + * @param method {@link HttpMethod} method + * @return corresponding {@link RestRequest.Method} + * @throws IllegalArgumentException if HTTP method is not supported + */ + public static RestRequest.Method convertMethod(HttpMethod method) { + if (method == HttpMethod.GET) { + return RestRequest.Method.GET; + } else if (method == HttpMethod.POST) { + return RestRequest.Method.POST; + } else if (method == HttpMethod.PUT) { + return RestRequest.Method.PUT; + } else if (method == HttpMethod.DELETE) { + return RestRequest.Method.DELETE; + } else if (method == HttpMethod.HEAD) { + return RestRequest.Method.HEAD; + } else if (method == HttpMethod.OPTIONS) { + return RestRequest.Method.OPTIONS; + } else if (method == HttpMethod.PATCH) { + return RestRequest.Method.PATCH; + } else if (method == HttpMethod.TRACE) { + return RestRequest.Method.TRACE; + } else if (method == HttpMethod.CONNECT) { + return RestRequest.Method.CONNECT; + } else { + throw new IllegalArgumentException("Unexpected http method: " + method); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java new file mode 100644 index 0000000000000..98b359319ff1b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpResponse; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import reactor.core.publisher.FluxSink; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +class NonStreamingHttpChannel implements HttpChannel { + private final HttpServerRequest request; + private final HttpServerResponse response; + private final CompletableContext closeContext = new CompletableContext<>(); + private final FluxSink emitter; + + NonStreamingHttpChannel(HttpServerRequest request, HttpServerResponse response, FluxSink emitter) { + this.request = request; + this.response = response; + this.emitter = emitter; + this.request.withConnection(connection -> Netty4Utils.addListener(connection.channel().closeFuture(), closeContext)); + } + + @Override + public boolean isOpen() { + final AtomicBoolean isOpen = new AtomicBoolean(); + request.withConnection(connection -> isOpen.set(connection.channel().isOpen())); + return isOpen.get(); + } + + @Override + public void close() { + request.withConnection(connection -> connection.channel().close()); + } + + @Override + public void addCloseListener(ActionListener listener) { + closeContext.addListener(ActionListener.toBiConsumer(listener)); + } + + @Override + public void sendResponse(HttpResponse response, ActionListener listener) { + emitter.next(createResponse(response)); + listener.onResponse(null); + emitter.complete(); + } + + @Override + public InetSocketAddress getRemoteAddress() { + return (InetSocketAddress) response.remoteAddress(); + } + + @Override + public InetSocketAddress getLocalAddress() { + return (InetSocketAddress) response.hostAddress(); + } + + FullHttpResponse createResponse(HttpResponse response) { + return (FullHttpResponse) response; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java new file mode 100644 index 0000000000000..d43e23e800e65 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.http.AbstractHttpServerTransport; +import org.opensearch.http.HttpRequest; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import io.netty.buffer.CompositeByteBuf; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.LastHttpContent; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import reactor.core.Disposable; +import reactor.core.publisher.Flux; +import reactor.core.publisher.FluxSink; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +class NonStreamingRequestConsumer implements Consumer, Publisher, Disposable { + private final HttpServerRequest request; + private final HttpServerResponse response; + private final CompositeByteBuf content; + private final Publisher publisher; + private final AbstractHttpServerTransport transport; + private final AtomicBoolean disposed = new AtomicBoolean(false); + private volatile FluxSink emitter; + + NonStreamingRequestConsumer( + AbstractHttpServerTransport transport, + HttpServerRequest request, + HttpServerResponse response, + int maxCompositeBufferComponents + ) { + this.transport = transport; + this.request = request; + this.response = response; + this.content = response.alloc().compositeBuffer(maxCompositeBufferComponents); + this.publisher = Flux.create(emitter -> register(emitter)); + } + + private void register(FluxSink emitter) { + this.emitter = emitter.onDispose(this).onCancel(this); + } + + @Override + public void accept(T message) { + try { + if (message instanceof LastHttpContent) { + process(message, emitter); + } else if (message instanceof HttpContent) { + process(message, emitter); + } + } catch (Throwable ex) { + emitter.error(ex); + } + } + + public void process(HttpContent in, FluxSink emitter) { + // Consume request body in full before dispatching it + content.addComponent(true, in.content().retain()); + + if (in instanceof LastHttpContent) { + final NonStreamingHttpChannel channel = new NonStreamingHttpChannel(request, response, emitter); + final HttpRequest r = createRequest(request, content); + + try { + transport.incomingRequest(r, channel); + } catch (Exception ex) { + emitter.error(ex); + transport.onException(channel, ex); + } finally { + r.release(); + if (disposed.compareAndSet(false, true)) { + this.content.release(); + } + } + } + } + + HttpRequest createRequest(HttpServerRequest request, CompositeByteBuf content) { + return new ReactorNetty4HttpRequest(request, content.retain()); + } + + @Override + public void subscribe(Subscriber s) { + publisher.subscribe(s); + } + + @Override + public void dispose() { + if (disposed.compareAndSet(false, true)) { + this.content.release(); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java new file mode 100644 index 0000000000000..4406c555a5b04 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java @@ -0,0 +1,272 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpRequest; +import org.opensearch.rest.RestRequest; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.util.AbstractMap; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.cookie.Cookie; +import io.netty.handler.codec.http.cookie.ServerCookieDecoder; +import io.netty.handler.codec.http.cookie.ServerCookieEncoder; +import reactor.netty.http.server.HttpServerRequest; + +class ReactorNetty4HttpRequest implements HttpRequest { + private final String protocol; + private final HttpMethod method; + private final String uri; + private final ByteBuf content; + private final HttpHeadersMap headers; + private final AtomicBoolean released; + private final Exception inboundException; + private final boolean pooled; + + ReactorNetty4HttpRequest(HttpServerRequest request, ByteBuf content) { + this(request, new HttpHeadersMap(request.requestHeaders()), new AtomicBoolean(false), true, content); + } + + ReactorNetty4HttpRequest(HttpServerRequest request, ByteBuf content, Exception inboundException) { + this( + request.protocol(), + request.method(), + request.uri(), + new HttpHeadersMap(request.requestHeaders()), + new AtomicBoolean(false), + true, + content, + inboundException + ); + } + + private ReactorNetty4HttpRequest( + HttpServerRequest request, + HttpHeadersMap headers, + AtomicBoolean released, + boolean pooled, + ByteBuf content + ) { + this(request.protocol(), request.method(), request.uri(), headers, released, pooled, content, null); + } + + private ReactorNetty4HttpRequest( + String protocol, + HttpMethod method, + String uri, + HttpHeadersMap headers, + AtomicBoolean released, + boolean pooled, + ByteBuf content, + Exception inboundException + ) { + + this.protocol = protocol; + this.method = method; + this.uri = uri; + this.headers = headers; + this.content = content; + this.pooled = pooled; + this.released = released; + this.inboundException = inboundException; + } + + @Override + public RestRequest.Method method() { + return HttpConversionUtil.convertMethod(method); + } + + @Override + public String uri() { + return uri; + } + + @Override + public BytesReference content() { + assert released.get() == false; + return Netty4Utils.toBytesReference(content); + } + + @Override + public void release() { + if (pooled && released.compareAndSet(false, true)) { + content.release(); + } + } + + @Override + public HttpRequest releaseAndCopy() { + assert released.get() == false; + if (pooled == false) { + return this; + } + try { + final ByteBuf copiedContent = Unpooled.copiedBuffer(content); + return new ReactorNetty4HttpRequest(protocol, method, uri, headers, new AtomicBoolean(false), false, copiedContent, null); + } finally { + release(); + } + } + + @Override + public final Map> getHeaders() { + return headers; + } + + @Override + public List strictCookies() { + String cookieString = headers.httpHeaders.get(HttpHeaderNames.COOKIE); + if (cookieString != null) { + Set cookies = ServerCookieDecoder.STRICT.decode(cookieString); + if (!cookies.isEmpty()) { + return ServerCookieEncoder.STRICT.encode(cookies); + } + } + return Collections.emptyList(); + } + + @Override + public HttpVersion protocolVersion() { + if (protocol.equals(io.netty.handler.codec.http.HttpVersion.HTTP_1_0.toString())) { + return HttpRequest.HttpVersion.HTTP_1_0; + } else if (protocol.equals(io.netty.handler.codec.http.HttpVersion.HTTP_1_1.toString())) { + return HttpRequest.HttpVersion.HTTP_1_1; + } else { + throw new IllegalArgumentException("Unexpected http protocol version: " + protocol); + } + } + + @Override + public HttpRequest removeHeader(String header) { + HttpHeaders headersWithoutContentTypeHeader = new DefaultHttpHeaders(); + headersWithoutContentTypeHeader.add(headers.httpHeaders); + headersWithoutContentTypeHeader.remove(header); + + return new ReactorNetty4HttpRequest( + protocol, + method, + uri, + new HttpHeadersMap(headersWithoutContentTypeHeader), + released, + pooled, + content, + null + ); + } + + @Override + public ReactorNetty4HttpResponse createResponse(RestStatus status, BytesReference content) { + return new ReactorNetty4HttpResponse( + headers.httpHeaders, + io.netty.handler.codec.http.HttpVersion.valueOf(protocol), + status, + content + ); + } + + @Override + public Exception getInboundException() { + return inboundException; + } + + /** + * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications + * and due to the underlying implementation, it performs case insensitive lookups of key to values. + * + * It is important to note that this implementation does have some downsides in that each invocation of the + * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a + * view of the underlying values. + */ + private static class HttpHeadersMap implements Map> { + + private final HttpHeaders httpHeaders; + + private HttpHeadersMap(HttpHeaders httpHeaders) { + this.httpHeaders = httpHeaders; + } + + @Override + public int size() { + return httpHeaders.size(); + } + + @Override + public boolean isEmpty() { + return httpHeaders.isEmpty(); + } + + @Override + public boolean containsKey(Object key) { + return key instanceof String && httpHeaders.contains((String) key); + } + + @Override + public boolean containsValue(Object value) { + return value instanceof List && httpHeaders.names().stream().map(httpHeaders::getAll).anyMatch(value::equals); + } + + @Override + public List get(Object key) { + return key instanceof String ? httpHeaders.getAll((String) key) : null; + } + + @Override + public List put(String key, List value) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public List remove(Object key) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public void putAll(Map> m) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public void clear() { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public Set keySet() { + return httpHeaders.names(); + } + + @Override + public Collection> values() { + return httpHeaders.names().stream().map(k -> Collections.unmodifiableList(httpHeaders.getAll(k))).collect(Collectors.toList()); + } + + @Override + public Set>> entrySet() { + return httpHeaders.names() + .stream() + .map(k -> new AbstractMap.SimpleImmutableEntry<>(k, httpHeaders.getAll(k))) + .collect(Collectors.toSet()); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java new file mode 100644 index 0000000000000..c45ad54b668a3 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpResponse; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; + +class ReactorNetty4HttpResponse extends DefaultFullHttpResponse implements HttpResponse { + private final HttpHeaders requestHeaders; + + ReactorNetty4HttpResponse(HttpHeaders requestHeaders, HttpVersion version, RestStatus status, BytesReference content) { + super(version, HttpResponseStatus.valueOf(status.getStatus()), Netty4Utils.toByteBuf(content)); + this.requestHeaders = requestHeaders; + } + + @Override + public void addHeader(String name, String value) { + headers().add(name, value); + } + + @Override + public boolean containsHeader(String name) { + return headers().contains(name); + } + + public HttpHeaders requestHeaders() { + return requestHeaders; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java new file mode 100644 index 0000000000000..84360bf028ba9 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.http.HttpServerChannel; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; + +import io.netty.channel.Channel; + +class ReactorNetty4HttpServerChannel implements HttpServerChannel { + private final Channel channel; + private final CompletableContext closeContext = new CompletableContext<>(); + + ReactorNetty4HttpServerChannel(Channel channel) { + this.channel = channel; + Netty4Utils.addListener(this.channel.closeFuture(), closeContext); + } + + @Override + public InetSocketAddress getLocalAddress() { + return (InetSocketAddress) channel.localAddress(); + } + + @Override + public void addCloseListener(ActionListener listener) { + closeContext.addListener(ActionListener.toBiConsumer(listener)); + } + + @Override + public boolean isOpen() { + return channel.isOpen(); + } + + @Override + public void close() { + channel.close(); + } + + @Override + public String toString() { + return "ReactorNetty4HttpChannel{localAddress=" + getLocalAddress() + "}"; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java new file mode 100644 index 0000000000000..d4a5a9ad83af6 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java @@ -0,0 +1,313 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.util.net.NetUtils; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.AbstractHttpServerTransport; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpReadTimeoutException; +import org.opensearch.http.HttpServerChannel; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; +import java.net.SocketOption; +import java.time.Duration; + +import io.netty.channel.ChannelOption; +import io.netty.channel.socket.nio.NioChannelOption; +import io.netty.handler.codec.http.DefaultLastHttpContent; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.timeout.ReadTimeoutException; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; +import reactor.netty.DisposableServer; +import reactor.netty.http.HttpProtocol; +import reactor.netty.http.server.HttpServer; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_CONNECT_TIMEOUT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_ALIVE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_COUNT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_IDLE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_INTERVAL; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_NO_DELAY; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; + +/** + * The HTTP transport implementations based on Reactor Netty (see please {@link HttpServer}). + */ +public class ReactorNetty4HttpServerTransport extends AbstractHttpServerTransport { + private static final String SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = "http.netty.max_composite_buffer_components"; + private static final ByteSizeValue MTU = new ByteSizeValue(Long.parseLong(System.getProperty("opensearch.net.mtu", "1500"))); + + /** + * The number of Reactor Netty HTTP workers + */ + public static final Setting SETTING_HTTP_WORKER_COUNT = Setting.intSetting("http.netty.worker_count", 0, Property.NodeScope); + + /** + * The maximum number of composite components for request accumulation + */ + public static Setting SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = new Setting<>( + SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, + (s) -> { + ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(s); + /* + * Netty accumulates buffers containing data from all incoming network packets that make up one HTTP request in an instance of + * io.netty.buffer.CompositeByteBuf (think of it as a buffer of buffers). Once its capacity is reached, the buffer will iterate + * over its individual entries and put them into larger buffers (see io.netty.buffer.CompositeByteBuf#consolidateIfNeeded() + * for implementation details). We want to to resize that buffer because this leads to additional garbage on the heap and also + * increases the application's native memory footprint (as direct byte buffers hold their contents off-heap). + * + * With this setting we control the CompositeByteBuf's capacity (which is by default 1024, see + * io.netty.handler.codec.MessageAggregator#DEFAULT_MAX_COMPOSITEBUFFER_COMPONENTS). To determine a proper default capacity for + * that buffer, we need to consider that the upper bound for the size of HTTP requests is determined by `maxContentLength`. The + * number of buffers that are needed depend on how often Netty reads network packets which depends on the network type (MTU). + * We assume here that OpenSearch receives HTTP requests via an Ethernet connection which has a MTU of 1500 bytes. + * + * Note that we are *not* pre-allocating any memory based on this setting but rather determine the CompositeByteBuf's capacity. + * The tradeoff is between less (but larger) buffers that are contained in the CompositeByteBuf and more (but smaller) buffers. + * With the default max content length of 100MB and a MTU of 1500 bytes we would allow 69905 entries. + */ + long maxBufferComponentsEstimate = Math.round((double) (maxContentLength.getBytes() / MTU.getBytes())); + // clamp value to the allowed range + long maxBufferComponents = Math.max(2, Math.min(maxBufferComponentsEstimate, Integer.MAX_VALUE)); + return String.valueOf(maxBufferComponents); + // Netty's CompositeByteBuf implementation does not allow less than two components. + }, + s -> Setting.parseInt(s, 2, Integer.MAX_VALUE, SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS), + Property.NodeScope + ); + + private final SharedGroupFactory sharedGroupFactory; + private final int readTimeoutMillis; + private final int connectTimeoutMillis; + private final int maxCompositeBufferComponents; + private final ByteSizeValue maxInitialLineLength; + private final ByteSizeValue maxHeaderSize; + private final ByteSizeValue maxChunkSize; + private volatile SharedGroupFactory.SharedGroup sharedGroup; + private volatile DisposableServer disposableServer; + private volatile Scheduler scheduler; + + /** + * Creates new HTTP transport implementations based on Reactor Netty (see please {@link HttpServer}). + * @param settings settings + * @param networkService network service + * @param bigArrays big array allocator + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param sharedGroupFactory shared group factory + * @param tracer tracer instance + */ + public ReactorNetty4HttpServerTransport( + Settings settings, + NetworkService networkService, + BigArrays bigArrays, + ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, + Dispatcher dispatcher, + ClusterSettings clusterSettings, + SharedGroupFactory sharedGroupFactory, + Tracer tracer + ) { + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings, tracer); + Netty4Utils.setAvailableProcessors(OpenSearchExecutors.NODE_PROCESSORS_SETTING.get(settings)); + this.readTimeoutMillis = Math.toIntExact(SETTING_HTTP_READ_TIMEOUT.get(settings).getMillis()); + this.connectTimeoutMillis = Math.toIntExact(SETTING_HTTP_CONNECT_TIMEOUT.get(settings).getMillis()); + this.sharedGroupFactory = sharedGroupFactory; + this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); + this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); + this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); + this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); + } + + /** + * Binds the transport engine to the socket address + * @param socketAddress socket address to bind to + */ + @Override + protected HttpServerChannel bind(InetSocketAddress socketAddress) throws Exception { + final HttpServer server = configureChannelOptions( + HttpServer.create() + .httpFormDecoder(builder -> builder.scheduler(scheduler)) + .idleTimeout(Duration.ofMillis(connectTimeoutMillis)) + .readTimeout(Duration.ofMillis(readTimeoutMillis)) + .runOn(sharedGroup.getLowLevelGroup()) + .bindAddress(() -> socketAddress) + .compress(true) + .httpRequestDecoder( + spec -> spec.maxChunkSize(maxChunkSize.bytesAsInt()) + .maxHeaderSize(maxHeaderSize.bytesAsInt()) + .maxInitialLineLength(maxInitialLineLength.bytesAsInt()) + ) + .protocol(HttpProtocol.HTTP11, HttpProtocol.H2C) + .handle((req, res) -> incomingRequest(req, res)) + ); + + disposableServer = server.bindNow(); + return new ReactorNetty4HttpServerChannel(disposableServer.channel()); + } + + private HttpServer configureChannelOptions(final HttpServer server1) { + HttpServer configured = server1.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings)) + .childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)); + + if (SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)) { + // Netty logs a warning if it can't set the option, so try this only on supported platforms + if (IOUtils.LINUX || IOUtils.MAC_OS_X) { + if (SETTING_HTTP_TCP_KEEP_IDLE.get(settings) >= 0) { + final SocketOption keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); + if (keepIdleOption != null) { + configured = configured.childOption(NioChannelOption.of(keepIdleOption), SETTING_HTTP_TCP_KEEP_IDLE.get(settings)); + } + } + if (SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) >= 0) { + final SocketOption keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); + if (keepIntervalOption != null) { + configured = configured.childOption( + NioChannelOption.of(keepIntervalOption), + SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) + ); + } + } + if (SETTING_HTTP_TCP_KEEP_COUNT.get(settings) >= 0) { + final SocketOption keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); + if (keepCountOption != null) { + configured = configured.childOption( + NioChannelOption.of(keepCountOption), + SETTING_HTTP_TCP_KEEP_COUNT.get(settings) + ); + } + } + } + } + + final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings); + if (tcpSendBufferSize.getBytes() > 0) { + configured = configured.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); + } + + final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings); + if (tcpReceiveBufferSize.getBytes() > 0) { + configured = configured.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); + } + + final boolean reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); + configured = configured.option(ChannelOption.SO_REUSEADDR, reuseAddress); + configured = configured.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); + + return configured; + } + + /** + * Handles incoming Reactor Netty request + * @param request request instance + * @param response response instances + * @return response publisher + */ + protected Publisher incomingRequest(HttpServerRequest request, HttpServerResponse response) { + final NonStreamingRequestConsumer consumer = new NonStreamingRequestConsumer<>( + this, + request, + response, + maxCompositeBufferComponents + ); + + request.receiveContent().switchIfEmpty(Mono.just(DefaultLastHttpContent.EMPTY_LAST_CONTENT)).subscribe(consumer); + + return Mono.from(consumer).flatMap(hc -> { + final FullHttpResponse r = (FullHttpResponse) hc; + response.status(r.status()); + response.trailerHeaders(c -> r.trailingHeaders().forEach(h -> c.add(h.getKey(), h.getValue()))); + response.chunkedTransfer(false); + response.compression(true); + r.headers().forEach(h -> response.addHeader(h.getKey(), h.getValue())); + return Mono.from(response.sendObject(r.content())); + }); + } + + /** + * Called to tear down internal resources + */ + @Override + protected void stopInternal() { + if (sharedGroup != null) { + sharedGroup.shutdown(); + sharedGroup = null; + } + + if (scheduler != null) { + scheduler.dispose(); + scheduler = null; + } + + if (disposableServer != null) { + disposableServer.disposeNow(); + disposableServer = null; + } + } + + /** + * Starts the transport + */ + @Override + protected void doStart() { + boolean success = false; + try { + scheduler = Schedulers.newBoundedElastic( + Schedulers.DEFAULT_BOUNDED_ELASTIC_SIZE, + Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, + "http-form-decoder" + ); + sharedGroup = sharedGroupFactory.getHttpGroup(); + bindServer(); + success = true; + } finally { + if (success == false) { + doStop(); // otherwise we leak threads since we never moved to started + } + } + } + + @Override + public void onException(HttpChannel channel, Exception cause) { + if (cause instanceof ReadTimeoutException) { + super.onException(channel, new HttpReadTimeoutException(readTimeoutMillis, cause)); + } else { + super.onException(channel, cause); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java new file mode 100644 index 0000000000000..b5ecb0b62f79d --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The new HTTP transport implementations based on Reactor Netty. + */ +package org.opensearch.http.reactor.netty4; diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java new file mode 100644 index 0000000000000..dc310c3793109 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.transport.reactor; + +import org.opensearch.common.SetOnce; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +/** + * The experimental network plugin that introduces new transport implementations based on Reactor Netty. + */ +public class ReactorNetty4Plugin extends Plugin implements NetworkPlugin { + /** + * The name of new experimental HTTP transport implementations based on Reactor Netty. + */ + public static final String REACTOR_NETTY_HTTP_TRANSPORT_NAME = "reactor-netty4"; + + private final SetOnce groupFactory = new SetOnce<>(); + + /** + * Default constructor + */ + public ReactorNetty4Plugin() {} + + /** + * Returns a list of additional {@link Setting} definitions for this plugin. + */ + @Override + public List> getSettings() { + return Arrays.asList(/* no setting registered since we're picking the onces from Netty 4 transport */); + } + + /** + * Returns a map of {@link HttpServerTransport} suppliers. + * See {@link org.opensearch.common.network.NetworkModule#HTTP_TYPE_SETTING} to configure a specific implementation. + * @param settings settings + * @param networkService network service + * @param bigArrays big array allocator + * @param pageCacheRecycler page cache recycler instance + * @param circuitBreakerService circuit breaker service instance + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param tracer tracer instance + */ + @Override + public Map> getHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.singletonMap( + REACTOR_NETTY_HTTP_TRANSPORT_NAME, + () -> new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + getSharedGroupFactory(settings), + tracer + ) + ); + } + + private SharedGroupFactory getSharedGroupFactory(Settings settings) { + final SharedGroupFactory groupFactory = this.groupFactory.get(); + if (groupFactory != null) { + assert groupFactory.getSettings().equals(settings) : "Different settings than originally provided"; + return groupFactory; + } else { + this.groupFactory.set(new SharedGroupFactory(settings)); + return this.groupFactory.get(); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java new file mode 100644 index 0000000000000..ab7de33c8e673 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.transport.reactor; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport; +import org.opensearch.transport.TcpTransport; +import org.opensearch.transport.reactor.netty4.ReactorNetty4Transport; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.util.concurrent.Future; + +import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; + +/** + * Creates and returns {@link io.netty.channel.EventLoopGroup} instances. It will return a shared group for + * both {@link #getHttpGroup()} and {@link #getTransportGroup()} if + * {@link org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport#SETTING_HTTP_WORKER_COUNT} is configured to be 0. + * If that setting is not 0, then it will return a different group in the {@link #getHttpGroup()} call. + */ +public final class SharedGroupFactory { + + private static final Logger logger = LogManager.getLogger(SharedGroupFactory.class); + + private final Settings settings; + private final int workerCount; + private final int httpWorkerCount; + + private RefCountedGroup genericGroup; + private SharedGroup dedicatedHttpGroup; + + /** + * Creates new shared group factory instance from settings + * @param settings settings + */ + public SharedGroupFactory(Settings settings) { + this.settings = settings; + this.workerCount = ReactorNetty4Transport.SETTING_WORKER_COUNT.get(settings); + this.httpWorkerCount = ReactorNetty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT.get(settings); + } + + Settings getSettings() { + return settings; + } + + /** + * Gets the number of configured transport workers + * @return the number of configured transport workers + */ + public int getTransportWorkerCount() { + return workerCount; + } + + /** + * Gets transport shared group + * @return transport shared group + */ + public synchronized SharedGroup getTransportGroup() { + return getGenericGroup(); + } + + /** + * Gets HTTP transport shared group + * @return HTTP transport shared group + */ + public synchronized SharedGroup getHttpGroup() { + if (httpWorkerCount == 0) { + return getGenericGroup(); + } else { + if (dedicatedHttpGroup == null) { + NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup( + httpWorkerCount, + daemonThreadFactory(settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX) + ); + dedicatedHttpGroup = new SharedGroup(new RefCountedGroup(eventLoopGroup)); + } + return dedicatedHttpGroup; + } + } + + private SharedGroup getGenericGroup() { + if (genericGroup == null) { + EventLoopGroup eventLoopGroup = new NioEventLoopGroup( + workerCount, + daemonThreadFactory(settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX) + ); + this.genericGroup = new RefCountedGroup(eventLoopGroup); + } else { + genericGroup.incRef(); + } + return new SharedGroup(genericGroup); + } + + private static class RefCountedGroup extends AbstractRefCounted { + + public static final String NAME = "ref-counted-event-loop-group"; + private final EventLoopGroup eventLoopGroup; + + private RefCountedGroup(EventLoopGroup eventLoopGroup) { + super(NAME); + this.eventLoopGroup = eventLoopGroup; + } + + @Override + protected void closeInternal() { + Future shutdownFuture = eventLoopGroup.shutdownGracefully(0, 5, TimeUnit.SECONDS); + shutdownFuture.awaitUninterruptibly(); + if (shutdownFuture.isSuccess() == false) { + logger.warn("Error closing netty event loop group", shutdownFuture.cause()); + } + } + } + + /** + * Wraps the {@link RefCountedGroup}. Calls {@link RefCountedGroup#decRef()} on close. After close, + * this wrapped instance can no longer be used. + */ + public static class SharedGroup { + + private final RefCountedGroup refCountedGroup; + + private final AtomicBoolean isOpen = new AtomicBoolean(true); + + private SharedGroup(RefCountedGroup refCountedGroup) { + this.refCountedGroup = refCountedGroup; + } + + /** + * Gets Netty's {@link EventLoopGroup} instance + * @return Netty's {@link EventLoopGroup} instance + */ + public EventLoopGroup getLowLevelGroup() { + return refCountedGroup.eventLoopGroup; + } + + /** + * Decreases the reference to underlying {@link EventLoopGroup} instance + */ + public void shutdown() { + if (isOpen.compareAndSet(true, false)) { + refCountedGroup.decRef(); + } + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java new file mode 100644 index 0000000000000..8ec432b7dd5cd --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java @@ -0,0 +1,142 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.transport.reactor.netty4; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.Booleans; +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.util.NettyRuntime; + +/** + * Shameless copy of Netty4Utils from transport-netty4 module + */ +public final class Netty4Utils { + private static final AtomicBoolean isAvailableProcessorsSet = new AtomicBoolean(); + + /** + * Utility class + */ + private Netty4Utils() {} + + /** + * Set the number of available processors that Netty uses for sizing various resources (e.g., thread pools). + * + * @param availableProcessors the number of available processors + * @throws IllegalStateException if available processors was set previously and the specified value does not match the already-set value + */ + public static void setAvailableProcessors(final int availableProcessors) { + // we set this to false in tests to avoid tests that randomly set processors from stepping on each other + final boolean set = Booleans.parseBoolean(System.getProperty("opensearch.set.netty.runtime.available.processors", "true")); + if (!set) { + return; + } + + /* + * This can be invoked twice, once from Netty4Transport and another time from Netty4HttpServerTransport; however, + * Netty4Runtime#availableProcessors forbids settings the number of processors twice so we prevent double invocation here. + */ + if (isAvailableProcessorsSet.compareAndSet(false, true)) { + NettyRuntime.setAvailableProcessors(availableProcessors); + } else if (availableProcessors != NettyRuntime.availableProcessors()) { + /* + * We have previously set the available processors yet either we are trying to set it to a different value now or there is a bug + * in Netty and our previous value did not take, bail. + */ + final String message = String.format( + Locale.ROOT, + "available processors value [%d] did not match current value [%d]", + availableProcessors, + NettyRuntime.availableProcessors() + ); + throw new IllegalStateException(message); + } + } + + /** + * Turns the given BytesReference into a ByteBuf. Note: the returned ByteBuf will reference the internal + * pages of the BytesReference. Don't free the bytes of reference before the ByteBuf goes out of scope. + * @param reference reference to convert + */ + public static ByteBuf toByteBuf(final BytesReference reference) { + if (reference.length() == 0) { + return Unpooled.EMPTY_BUFFER; + } + final BytesRefIterator iterator = reference.iterator(); + // usually we have one, two, or three components from the header, the message, and a buffer + final List buffers = new ArrayList<>(3); + try { + BytesRef slice; + while ((slice = iterator.next()) != null) { + buffers.add(Unpooled.wrappedBuffer(slice.bytes, slice.offset, slice.length)); + } + + if (buffers.size() == 1) { + return buffers.get(0); + } else { + CompositeByteBuf composite = Unpooled.compositeBuffer(buffers.size()); + composite.addComponents(true, buffers); + return composite; + } + } catch (IOException ex) { + throw new AssertionError("no IO happens here", ex); + } + } + + /** + * Wraps the given ChannelBuffer with a BytesReference + * @param buffer buffer to convert + */ + public static BytesReference toBytesReference(final ByteBuf buffer) { + final int readableBytes = buffer.readableBytes(); + if (readableBytes == 0) { + return BytesArray.EMPTY; + } else if (buffer.hasArray()) { + return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), readableBytes); + } else { + final ByteBuffer[] byteBuffers = buffer.nioBuffers(); + return BytesReference.fromByteBuffers(byteBuffers); + } + } + + /** + * Add completion listener to ChannelFuture + * @param channelFuture ChannelFuture to add listener to + * @param context completion listener context + */ + public static void addListener(ChannelFuture channelFuture, CompletableContext context) { + channelFuture.addListener(f -> { + if (f.isSuccess()) { + context.complete(null); + } else { + Throwable cause = f.cause(); + if (cause instanceof Error) { + ExceptionsHelper.maybeDieOnAnotherThread(cause); + context.completeExceptionally(new Exception(cause)); + } else { + context.completeExceptionally((Exception) cause); + } + } + }); + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java new file mode 100644 index 0000000000000..b3e92f58c540a --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.reactor.netty4; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; + +import reactor.netty.tcp.TcpServer; + +/** + * The transport implementations based on Reactor Netty (see please {@link TcpServer}). + */ +public class ReactorNetty4Transport { + /** + * The number of Netty workers + */ + public static final Setting SETTING_WORKER_COUNT = new Setting<>( + "transport.netty.worker_count", + (s) -> Integer.toString(OpenSearchExecutors.allocatedProcessors(s)), + (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), + Property.NodeScope + ); + + /** + * Default constructor + */ + public ReactorNetty4Transport() {} +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java new file mode 100644 index 0000000000000..921bca104c6fe --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The new transport implementations based on Reactor Netty. + */ +package org.opensearch.transport.reactor.netty4; diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java new file mode 100644 index 0000000000000..2f36ebb7f11f8 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The experimental network plugin that introduces new transport implementations based on Reactor Netty. + */ +package org.opensearch.transport.reactor; diff --git a/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy b/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..4f2dcde995338 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant codeBase "${codebase.netty-common}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; + + // netty makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,connect"; + + // Netty sets custom classloader for some of its internal threads + permission java.lang.RuntimePermission "*", "setContextClassLoader"; +}; + +grant codeBase "${codebase.netty-transport}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 + // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! + permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; +}; diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java new file mode 100644 index 0000000000000..443ecd0f40ead --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java @@ -0,0 +1,208 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.collect.Tuple; +import org.opensearch.tasks.Task; + +import java.io.Closeable; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.EmptyHttpHeaders; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.HttpConversionUtil; +import io.netty.resolver.DefaultAddressResolverGroup; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.publisher.ParallelFlux; +import reactor.netty.http.client.HttpClient; + +import static io.netty.handler.codec.http.HttpHeaderNames.HOST; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; + +/** + * Tiny helper to send http requests over netty. + */ +class ReactorHttpClient implements Closeable { + private final boolean compression; + + static Collection returnHttpResponseBodies(Collection responses) { + List list = new ArrayList<>(responses.size()); + for (FullHttpResponse response : responses) { + list.add(response.content().toString(StandardCharsets.UTF_8)); + } + return list; + } + + static Collection returnOpaqueIds(Collection responses) { + List list = new ArrayList<>(responses.size()); + for (HttpResponse response : responses) { + list.add(response.headers().get(Task.X_OPAQUE_ID)); + } + return list; + } + + ReactorHttpClient(boolean compression) { + this.compression = compression; + } + + static ReactorHttpClient create() { + return create(true); + } + + static ReactorHttpClient create(boolean compression) { + return new ReactorHttpClient(compression); + } + + public List get(InetSocketAddress remoteAddress, String... uris) throws InterruptedException { + return get(remoteAddress, false, uris); + } + + public List get(InetSocketAddress remoteAddress, boolean ordered, String... uris) throws InterruptedException { + final List requests = new ArrayList<>(uris.length); + + for (int i = 0; i < uris.length; i++) { + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); + httpRequest.headers().add(HOST, "localhost"); + httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); + httpRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + requests.add(httpRequest); + } + + return sendRequests(remoteAddress, requests, ordered); + } + + public final Collection post(InetSocketAddress remoteAddress, List> urisAndBodies) + throws InterruptedException { + return processRequestsWithBody(HttpMethod.POST, remoteAddress, urisAndBodies); + } + + public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequest httpRequest) throws InterruptedException { + final List responses = sendRequests(remoteAddress, Collections.singleton(httpRequest), false); + assert responses.size() == 1 : "expected 1 and only 1 http response"; + return responses.get(0); + } + + public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequest httpRequest, HttpContent content) + throws InterruptedException { + final List responses = sendRequests( + remoteAddress, + Collections.singleton( + new DefaultFullHttpRequest( + httpRequest.protocolVersion(), + httpRequest.method(), + httpRequest.uri(), + content.content(), + httpRequest.headers(), + httpRequest.trailingHeaders() + ) + ), + false + ); + assert responses.size() == 1 : "expected 1 and only 1 http response"; + return responses.get(0); + } + + public final Collection put(InetSocketAddress remoteAddress, List> urisAndBodies) + throws InterruptedException { + return processRequestsWithBody(HttpMethod.PUT, remoteAddress, urisAndBodies); + } + + private List processRequestsWithBody( + HttpMethod method, + InetSocketAddress remoteAddress, + List> urisAndBodies + ) throws InterruptedException { + List requests = new ArrayList<>(urisAndBodies.size()); + for (int i = 0; i < urisAndBodies.size(); ++i) { + final Tuple uriAndBody = urisAndBodies.get(i); + ByteBuf content = Unpooled.copiedBuffer(uriAndBody.v2(), StandardCharsets.UTF_8); + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, method, uriAndBody.v1(), content); + request.headers().add(HttpHeaderNames.HOST, "localhost"); + request.headers().add(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes()); + request.headers().add(HttpHeaderNames.CONTENT_TYPE, "application/json"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + request.headers().add("X-Opaque-ID", String.valueOf(i)); + requests.add(request); + } + return sendRequests(remoteAddress, requests, false); + } + + private List sendRequests( + final InetSocketAddress remoteAddress, + final Collection requests, + boolean orderer + ) { + final NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(1); + try { + final HttpClient client = HttpClient.newConnection() + .resolver(DefaultAddressResolverGroup.INSTANCE) + .runOn(eventLoopGroup) + .host(remoteAddress.getHostString()) + .port(remoteAddress.getPort()) + .compress(compression); + + @SuppressWarnings("unchecked") + final Mono[] monos = requests.stream() + .map( + request -> client.headers(h -> h.add(request.headers())) + .baseUrl(request.getUri()) + .request(request.method()) + .send(Mono.fromSupplier(() -> request.content())) + .responseSingle( + (r, body) -> body.switchIfEmpty(Mono.just(Unpooled.EMPTY_BUFFER)) + .map( + b -> new DefaultFullHttpResponse( + r.version(), + r.status(), + b.retain(), + r.responseHeaders(), + EmptyHttpHeaders.INSTANCE + ) + ) + ) + ) + .toArray(Mono[]::new); + + if (orderer == false) { + return ParallelFlux.from(monos).sequential().collectList().block(); + } else { + return Flux.concat(monos).flatMapSequential(r -> Mono.just(r)).collectList().block(); + } + } finally { + eventLoopGroup.shutdownGracefully().awaitUninterruptibly(); + } + } + + @Override + public void close() { + + } +} diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java new file mode 100644 index 0000000000000..00ca378a4e46b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class ReactorNetty4BadRequestTests extends OpenSearchTestCase { + + private NetworkService networkService; + private MockBigArrays bigArrays; + private ThreadPool threadPool; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + threadPool = new TestThreadPool("test"); + } + + @After + public void shutdown() throws Exception { + terminate(threadPool); + } + + public void testBadParameterEncoding() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + fail(); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + try { + final Exception e = cause instanceof Exception ? (Exception) cause : new OpenSearchException(cause); + channel.sendResponse(new BytesRestResponse(channel, RestStatus.BAD_REQUEST, e)); + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + } + }; + + Settings settings = Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PORT.getKey(), getPortRange()).build(); + try ( + HttpServerTransport httpServerTransport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + httpServerTransport.start(); + final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (ReactorHttpClient nettyHttpClient = ReactorHttpClient.create()) { + final List responses = nettyHttpClient.get(transportAddress.address(), "/_cluster/settings?pretty=%"); + + try { + assertThat(responses, hasSize(1)); + final FullHttpResponse response = responses.get(0); + assertThat(response.status().code(), equalTo(400)); + final Collection responseBodies = ReactorHttpClient.returnHttpResponseBodies(responses); + assertThat(responseBodies, hasSize(1)); + final String body = responseBodies.iterator().next(); + assertThat(body, containsString("\"type\":\"bad_parameter_exception\"")); + assertThat( + body, + containsString("\"reason\":\"java.lang.IllegalArgumentException: partial escape sequence at end of string: %/\"") + ); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + } + +} diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java new file mode 100644 index 0000000000000..15a5b04c802a4 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java @@ -0,0 +1,579 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.http.BindHttpException; +import org.opensearch.http.CorsHandler; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.http.NullDispatcher; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.NettyAllocator; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.junit.After; +import org.junit.Before; + +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.PoolArenaMetric; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.buffer.PooledByteBufAllocatorMetric; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpContent; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; + +import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +/** + * Tests for the {@link ReactorNetty4HttpServerTransport} class. + */ +public class ReactorNetty4HttpServerTransportTests extends OpenSearchTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private ClusterSettings clusterSettings; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + clusterSettings = null; + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} supports the "Expect: 100-continue" HTTP header + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeader() throws InterruptedException { + final Settings settings = createSettings(); + final int contentLength = randomIntBetween(1, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).bytesAsInt()); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.CONTINUE); + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} responds to a + * 100-continue expectation with too large a content-length + * with a 413 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeaderContentLengthTooLong() throws InterruptedException { + final String key = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(); + final int maxContentLength = randomIntBetween(1, 104857600); + final Settings settings = createBuilderWithPort().put(key, maxContentLength + "b").build(); + final int contentLength = randomIntBetween(maxContentLength + 1, Integer.MAX_VALUE); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} responds to an unsupported expectation with a 417 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectUnsupportedExpectation() throws InterruptedException { + Settings settings = createSettings(); + runExpectHeaderTest(settings, "chocolate=yummy", 0, HttpResponseStatus.EXPECTATION_FAILED); + } + + private void runExpectHeaderTest( + final Settings settings, + final String expectation, + final int contentLength, + final HttpResponseStatus expectedStatus + ) throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done"))); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + }; + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + request.headers().set(HttpHeaderNames.EXPECT, expectation); + HttpUtil.setContentLength(request, contentLength); + + // Reactor Netty 4 does not expose 100 CONTINUE response but instead just asks for content + final HttpContent continuationRequest = new DefaultHttpContent(Unpooled.EMPTY_BUFFER); + final FullHttpResponse continuationResponse = client.send(remoteAddress.address(), request, continuationRequest); + try { + assertThat(continuationResponse.status(), is(HttpResponseStatus.OK)); + assertThat(new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8), is("done")); + } finally { + continuationResponse.release(); + } + } + } + } + + public void testBindUnavailableAddress() { + Settings initialSettings = createSettings(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + initialSettings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder() + .put("http.port", remoteAddress.getPort()) + .put("network.host", remoteAddress.getAddress()) + .build(); + try ( + ReactorNetty4HttpServerTransport otherTransport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start); + assertEquals("Failed to bind to " + NetworkAddress.format(remoteAddress.address()), bindHttpException.getMessage()); + } + } + } + + public void testBadRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error("--> Unexpected bad request request"); + throw new AssertionError(cause); + } + }; + + final Settings settings; + final int maxInitialLineLength; + final Setting httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + if (randomBoolean()) { + maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt(); + settings = createSettings(); + } else { + maxInitialLineLength = randomIntBetween(1, 8192); + settings = createBuilderWithPort().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build(); + } + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final String url = "/" + randomAlphaOfLength(maxInitialLineLength); + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.REQUEST_URI_TOO_LONG)); + assertThat(response.content().array().length, equalTo(0)); + } finally { + response.release(); + } + } + } + } + + public void testDispatchFailed() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + throw new RuntimeException("Bad things happen"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error("--> Unexpected bad request request"); + throw new AssertionError(cause); + } + }; + + final Settings settings = createSettings(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.INTERNAL_SERVER_ERROR)); + assertThat(response.content().array().length, equalTo(0)); + } finally { + response.release(); + } + } + } + } + + public void testLargeCompressedResponse() throws InterruptedException { + final String responseString = randomAlphaOfLength(4 * 1024 * 1024); + final String url = "/thing/"; + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + if (url.equals(request.uri())) { + channel.sendResponse(new BytesRestResponse(OK, responseString)); + } else { + logger.error("--> Unexpected successful uri [{}]", request.uri()); + throw new AssertionError(); + } + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); + long numOfHugeAllocations = getHugeAllocationCount(); + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(getHugeAllocationCount(), equalTo(numOfHugeAllocations)); + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + byte[] bytes = new byte[response.content().readableBytes()]; + response.content().readBytes(bytes); + assertThat(new String(bytes, StandardCharsets.UTF_8), equalTo(responseString)); + } finally { + response.release(); + } + } + } + } + + private long getHugeAllocationCount() { + long numOfHugAllocations = 0; + ByteBufAllocator allocator = NettyAllocator.getAllocator(); + assert allocator instanceof NettyAllocator.NoDirectBuffers; + ByteBufAllocator delegate = ((NettyAllocator.NoDirectBuffers) allocator).getDelegate(); + if (delegate instanceof PooledByteBufAllocator) { + PooledByteBufAllocatorMetric metric = ((PooledByteBufAllocator) delegate).metric(); + numOfHugAllocations = metric.heapArenas().stream().mapToLong(PoolArenaMetric::numHugeAllocations).sum(); + } + return numOfHugAllocations; + } + + public void testCorsRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + final Settings settings = createBuilderWithPort().put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "test-cors.org") + .build(); + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + // Test pre-flight request + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); + request.headers().add(CorsHandler.ORIGIN, "test-cors.org"); + request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + assertThat(response.headers().get(CorsHandler.ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("test-cors.org")); + assertThat(response.headers().get(CorsHandler.VARY), equalTo(CorsHandler.ORIGIN)); + assertTrue(response.headers().contains(CorsHandler.DATE)); + } finally { + response.release(); + } + } + + // Test short-circuited request + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(CorsHandler.ORIGIN, "google.com"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.FORBIDDEN)); + } finally { + response.release(); + } + } + } + } + + public void testConnectTimeout() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError("Should not have received a dispatched request"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError("Should not have received a dispatched request"); + } + + }; + + Settings settings = createBuilderWithPort().put( + HttpTransportSettings.SETTING_HTTP_CONNECT_TIMEOUT.getKey(), + new TimeValue(randomIntBetween(100, 300)) + ).build(); + + NioEventLoopGroup group = new NioEventLoopGroup(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + final CountDownLatch channelClosedLatch = new CountDownLatch(1); + + final Bootstrap clientBootstrap = new Bootstrap().option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .channel(NioSocketChannel.class) + .handler(new ChannelInitializer() { + + @Override + protected void initChannel(SocketChannel ch) { + ch.pipeline().addLast(new ChannelHandlerAdapter() { + }); + + } + }) + .group(group); + ChannelFuture connect = clientBootstrap.connect(remoteAddress.address()); + connect.channel().closeFuture().addListener(future -> channelClosedLatch.countDown()); + + assertTrue("Channel should be closed due to read timeout", channelClosedLatch.await(1, TimeUnit.MINUTES)); + + } finally { + group.shutdownGracefully().await(); + } + } + + private Settings createSettings() { + return createBuilderWithPort().build(); + } + + private Settings.Builder createBuilderWithPort() { + return Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PORT.getKey(), getPortRange()); + } +} diff --git a/server/build.gradle b/server/build.gradle index c56f9d5aa288f..fa8a44ef6fc94 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -154,6 +154,10 @@ dependencies { // jcraft api "com.jcraft:jzlib:${versions.jzlib}" + // reactor + api "io.projectreactor:reactor-core:${versions.reactor}" + api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" + // protobuf api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" @@ -366,11 +370,13 @@ tasks.named("thirdPartyAudit").configure { 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', 'com.google.protobuf.UnsafeUtil$MemoryAccessor', 'org.apache.logging.log4j.core.util.internal.UnsafeUtil', - 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1' + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1', + 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException' ) } tasks.named("dependencyLicenses").configure { + mapping from: /reactor-.*/, to: 'reactor' mapping from: /lucene-.*/, to: 'lucene' dependencies = project.configurations.runtimeClasspath.fileCollection { it.group.startsWith('org.opensearch') == false || diff --git a/plugins/crypto-kms/licenses/reactive-streams-1.0.4.jar.sha1 b/server/licenses/reactive-streams-1.0.4.jar.sha1 similarity index 100% rename from plugins/crypto-kms/licenses/reactive-streams-1.0.4.jar.sha1 rename to server/licenses/reactive-streams-1.0.4.jar.sha1 diff --git a/plugins/crypto-kms/licenses/reactive-streams-LICENSE.txt b/server/licenses/reactive-streams-LICENSE.txt similarity index 100% rename from plugins/crypto-kms/licenses/reactive-streams-LICENSE.txt rename to server/licenses/reactive-streams-LICENSE.txt diff --git a/plugins/discovery-ec2/licenses/reactive-streams-NOTICE.txt b/server/licenses/reactive-streams-NOTICE.txt similarity index 100% rename from plugins/discovery-ec2/licenses/reactive-streams-NOTICE.txt rename to server/licenses/reactive-streams-NOTICE.txt diff --git a/server/licenses/reactor-LICENSE.txt b/server/licenses/reactor-LICENSE.txt new file mode 100644 index 0000000000000..e5583c184e67a --- /dev/null +++ b/server/licenses/reactor-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-azure/licenses/reactive-streams-NOTICE.txt b/server/licenses/reactor-NOTICE.txt similarity index 100% rename from plugins/repository-azure/licenses/reactive-streams-NOTICE.txt rename to server/licenses/reactor-NOTICE.txt diff --git a/server/licenses/reactor-core-3.5.11.jar.sha1 b/server/licenses/reactor-core-3.5.11.jar.sha1 new file mode 100644 index 0000000000000..e5ffdbc8a7840 --- /dev/null +++ b/server/licenses/reactor-core-3.5.11.jar.sha1 @@ -0,0 +1 @@ +db2299757f562261eb775d13658e86ff06f91e8a \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 5ab1f49949679..2bb81064c9c71 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -348,6 +348,7 @@ public void apply(Settings value, Settings current, Settings previous) { HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE, HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH, HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT, + HttpTransportSettings.SETTING_HTTP_CONNECT_TIMEOUT, HttpTransportSettings.SETTING_HTTP_RESET_COOKIES, HttpTransportSettings.OLD_SETTING_HTTP_TCP_NO_DELAY, HttpTransportSettings.SETTING_HTTP_TCP_NO_DELAY, diff --git a/server/src/main/java/org/opensearch/http/HttpTransportSettings.java b/server/src/main/java/org/opensearch/http/HttpTransportSettings.java index f16f06f414e28..621ef36692178 100644 --- a/server/src/main/java/org/opensearch/http/HttpTransportSettings.java +++ b/server/src/main/java/org/opensearch/http/HttpTransportSettings.java @@ -182,6 +182,14 @@ public final class HttpTransportSettings { Property.NodeScope ); + // A default of 0 means that by default there is no connect timeout + public static final Setting SETTING_HTTP_CONNECT_TIMEOUT = Setting.timeSetting( + "http.connect_timeout", + new TimeValue(0), + new TimeValue(0), + Property.NodeScope + ); + // Tcp socket settings public static final Setting OLD_SETTING_HTTP_TCP_NO_DELAY = boolSetting( diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index 8490ee4fc39bc..b5ff30deecf5c 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -175,6 +175,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import reactor.core.scheduler.Schedulers; + import static java.util.Collections.emptyMap; import static org.opensearch.core.common.util.CollectionUtils.arrayAsArrayList; import static org.hamcrest.Matchers.empty; @@ -225,6 +227,7 @@ public static void resetPortCounter() { @Override public void tearDown() throws Exception { + Schedulers.shutdownNow(); FeatureFlagSetter.clear(); super.tearDown(); } From c40014f8d37f27811362a3d6b3296cdc032b4399 Mon Sep 17 00:00:00 2001 From: Navneet Verma Date: Mon, 6 Nov 2023 13:55:27 -0800 Subject: [PATCH 127/178] Adding version condition while adding geoshape doc values to the index, to ensure backward compatibility. (#11095) Signed-off-by: Navneet Verma --- CHANGELOG.md | 1 + ...AbstractGeoBucketAggregationIntegTest.java | 6 +- .../test/mixed_cluster/30_geoshape.yml | 16 +++++ .../test/old_cluster/30_geoshape.yml | 28 +++++++++ .../test/upgraded_cluster/30_geoshape.yml | 61 +++++++++++++++++++ .../index/mapper/GeoShapeFieldMapper.java | 25 +++++++- 6 files changed, 131 insertions(+), 6 deletions(-) create mode 100644 qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml create mode 100644 qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml create mode 100644 qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index a71d2ff537834..435683442d4c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -139,6 +139,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) - Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) - Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) +- Adding version condition while adding geoshape doc values to the index, to ensure backward compatibility.([#11095](https://github.com/opensearch-project/OpenSearch/pull/11095)) ### Security diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java index 86d8ad2968e7f..7316847ac6046 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java @@ -87,7 +87,7 @@ protected boolean forbidPrivateIndexSettings() { */ protected void prepareGeoShapeIndexForAggregations(final Random random) throws Exception { expectedDocsCountForGeoShapes = new HashMap<>(); - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); final List geoshapes = new ArrayList<>(); assertAcked(prepareCreate(GEO_SHAPE_INDEX_NAME).setSettings(settings).setMapping(GEO_SHAPE_FIELD_NAME, "type" + "=geo_shape")); boolean isShapeIntersectingBB = false; @@ -136,7 +136,7 @@ protected void prepareSingleValueGeoPointIndex(final Random random) throws Excep expectedDocCountsForSingleGeoPoint = new HashMap<>(); createIndex("idx_unmapped"); final Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put("index.number_of_shards", 4) .put("index.number_of_replicas", 0) .build(); @@ -160,7 +160,7 @@ protected void prepareSingleValueGeoPointIndex(final Random random) throws Excep protected void prepareMultiValuedGeoPointIndex(final Random random) throws Exception { multiValuedExpectedDocCountsGeoPoint = new HashMap<>(); - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); final List cities = new ArrayList<>(); assertAcked( prepareCreate("multi_valued_idx").setSettings(settings) diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml new file mode 100644 index 0000000000000..e669016cad98a --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml @@ -0,0 +1,16 @@ +--- +"Insert Document with geoshape field": + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_old", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_old", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_old + - match: { hits.total: 2 } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml new file mode 100644 index 0000000000000..30a39447905c0 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml @@ -0,0 +1,28 @@ +--- +"Create index with Geoshape field": + - do: + indices.create: + index: geo_shape_index_old + body: + settings: + index: + number_of_replicas: 2 + mappings: + "properties": + "location": + "type": "geo_shape" + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_old", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_old", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_old + - match: { hits.total: 2 } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml new file mode 100644 index 0000000000000..4c7b12a7f1909 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml @@ -0,0 +1,61 @@ +--- +"Validate we are able to index documents after upgrade": + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_old", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_old", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_old + - match: { hits.total: 2 } + + +--- +"Create index with Geoshape field in new cluster": + - do: + indices.create: + index: geo_shape_index_new + body: + settings: + index: + number_of_replicas: 2 + mappings: + "properties": + "location": + "type": "geo_shape" + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_new", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_new", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_new + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_new + body: + aggregations: + myaggregation: + geo_bounds: + field: "location" + - match: { hits.total: 2 } + - match: { aggregations.myaggregation.bounds.top_left.lat: 0.9999999823048711 } + - match: { aggregations.myaggregation.bounds.top_left.lon: 99.99999999068677 } + - match: { aggregations.myaggregation.bounds.bottom_right.lat: 0.0 } + - match: { aggregations.myaggregation.bounds.bottom_right.lon: 105.99999996833503 } diff --git a/server/src/main/java/org/opensearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/GeoShapeFieldMapper.java index 4a4b2684b5f4c..b44b4b75549c3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/GeoShapeFieldMapper.java @@ -31,12 +31,15 @@ package org.opensearch.index.mapper; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.LatLonShape; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; +import org.opensearch.Version; import org.opensearch.common.Explicit; import org.opensearch.common.geo.GeometryParser; import org.opensearch.common.geo.ShapeRelation; @@ -77,6 +80,7 @@ * @opensearch.internal */ public class GeoShapeFieldMapper extends AbstractShapeGeometryFieldMapper { + private static final Logger logger = LogManager.getLogger(GeoShapeFieldMapper.class); public static final String CONTENT_TYPE = "geo_shape"; public static final FieldType FIELD_TYPE = new FieldType(); static { @@ -205,9 +209,24 @@ protected void addDocValuesFields( final List indexableFields, final ParseContext context ) { - Field[] fieldsArray = new Field[indexableFields.size()]; - fieldsArray = indexableFields.toArray(fieldsArray); - context.doc().add(LatLonShape.createDocValueField(name, fieldsArray)); + /* + * We are adding the doc values for GeoShape only if the index is created with 2.9 and above version of + * OpenSearch. If we don't do that after the upgrade of OpenSearch customers are not able to index documents + * with GeoShape fields. Github issue: https://github.com/opensearch-project/OpenSearch/issues/10958, + * https://github.com/opensearch-project/OpenSearch/issues/10795 + */ + if (context.indexSettings().getIndexVersionCreated().onOrAfter(Version.V_2_9_0)) { + Field[] fieldsArray = new Field[indexableFields.size()]; + fieldsArray = indexableFields.toArray(fieldsArray); + context.doc().add(LatLonShape.createDocValueField(name, fieldsArray)); + } else { + logger.warn( + "The index was created with Version : {}, for geoshape doc values to work index must be " + + "created with OpenSearch Version : {} or above", + context.indexSettings().getIndexVersionCreated(), + Version.V_2_9_0 + ); + } } @Override From 3bbc4952941c7a62f04b8aca3c973bf35677adb6 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Mon, 6 Nov 2023 14:52:50 -0800 Subject: [PATCH 128/178] Fix flaky test RemoteIndexShardtests.testRepicaCleansUpOldCommitsWhenReceivingNew (#11111) Signed-off-by: Marc Handalian --- .../index/shard/RemoteIndexShardTests.java | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index 20cec90d79e3e..dd92bfb47afdb 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -32,6 +32,7 @@ import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.CorruptionUtils; +import org.opensearch.test.junit.annotations.TestLogging; import org.hamcrest.MatcherAssert; import org.junit.Assert; @@ -297,31 +298,39 @@ public void testPrimaryRestart_PrimaryHasExtraCommits() throws Exception { } } + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testRepicaCleansUpOldCommitsWhenReceivingNew() throws Exception { final Path remotePath = createTempDir(); try (ReplicationGroup shards = createGroup(1, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory(), remotePath)) { shards.startAll(); final IndexShard primary = shards.getPrimary(); final IndexShard replica = shards.getReplicas().get(0); + final Store store = replica.store(); + final SegmentInfos initialCommit = store.readLastCommittedSegmentsInfo(); shards.indexDocs(1); flushShard(primary); replicateSegments(primary, shards.getReplicas()); + assertDocCount(primary, 1); assertDocCount(replica, 1); - assertEquals("segments_5", replica.store().readLastCommittedSegmentsInfo().getSegmentsFileName()); - assertSingleSegmentFile(replica, "segments_5"); + assertSingleSegmentFile(replica); + final SegmentInfos secondCommit = store.readLastCommittedSegmentsInfo(); + assertTrue(secondCommit.getGeneration() > initialCommit.getGeneration()); shards.indexDocs(1); primary.refresh("test"); replicateSegments(primary, shards.getReplicas()); assertDocCount(replica, 2); - assertSingleSegmentFile(replica, "segments_5"); + assertSingleSegmentFile(replica); + assertEquals(store.readLastCommittedSegmentsInfo().getGeneration(), secondCommit.getGeneration()); shards.indexDocs(1); flushShard(primary); replicateSegments(primary, shards.getReplicas()); assertDocCount(replica, 3); - assertSingleSegmentFile(replica, "segments_6"); + assertSingleSegmentFile(replica); + final SegmentInfos thirdCommit = store.readLastCommittedSegmentsInfo(); + assertTrue(thirdCommit.getGeneration() > secondCommit.getGeneration()); final Store.RecoveryDiff diff = Store.segmentReplicationDiff(primary.getSegmentMetadataMap(), replica.getSegmentMetadataMap()); assertTrue(diff.missing.isEmpty()); @@ -571,11 +580,10 @@ protected void validateShardIdleWithNoReplicas(IndexShard primary) { assertFalse(primary.hasRefreshPending()); } - private void assertSingleSegmentFile(IndexShard shard, String fileName) throws IOException { + private void assertSingleSegmentFile(IndexShard shard) throws IOException { final Set segmentsFileNames = Arrays.stream(shard.store().directory().listAll()) .filter(file -> file.startsWith(IndexFileNames.SEGMENTS)) .collect(Collectors.toSet()); assertEquals("Expected a single segment file", 1, segmentsFileNames.size()); - assertEquals(segmentsFileNames.stream().findFirst().get(), fileName); } } From c206159b4e136ac9ddd96c0ec5eeb74465092088 Mon Sep 17 00:00:00 2001 From: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Date: Mon, 6 Nov 2023 22:46:21 -0800 Subject: [PATCH 129/178] Fix flaky testMinScoreDisablesCountOptimization in query profile phase tests (#11104) (#11104) Signed-off-by: Ticheng Lin --- .../search/query/QueryProfilePhaseTests.java | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index 28af8a63cfba8..62dcf54e25578 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -336,12 +336,17 @@ public void testMinScoreDisablesCountOptimization() throws Exception { assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); assertProfileData(context, "MatchAllDocsQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); - assertThat(query.getTimeBreakdown().get("score"), greaterThanOrEqualTo(100L)); + assertThat(query.getTimeBreakdown().get("score"), greaterThanOrEqualTo(1L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L)); if (executor != null) { - assertThat(query.getTimeBreakdown().get("max_score"), greaterThanOrEqualTo(100L)); - assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(100L)); - assertThat(query.getTimeBreakdown().get("avg_score"), greaterThanOrEqualTo(100L)); + long maxScore = query.getTimeBreakdown().get("max_score"); + long minScore = query.getTimeBreakdown().get("min_score"); + long avgScore = query.getTimeBreakdown().get("avg_score"); + assertThat(maxScore, greaterThanOrEqualTo(1L)); + assertThat(minScore, greaterThanOrEqualTo(1L)); + assertThat(avgScore, greaterThanOrEqualTo(1L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); assertThat(query.getTimeBreakdown().get("max_score_count"), equalTo(1L)); assertThat(query.getTimeBreakdown().get("min_score_count"), equalTo(1L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), equalTo(1L)); From 145fd9ecd72ed4318a6e7ed3f49453cef6b9ac8b Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Tue, 7 Nov 2023 04:40:20 -0800 Subject: [PATCH 130/178] Unmute FieldCapabilitiesIT.testWithIndexFilter for concurrent segment search (#11110) Signed-off-by: Jay Deng --- .../org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java index 6b95405b3ebd4..f5d1b8234558e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -244,10 +244,6 @@ public void testWithIndexAlias() { } public void testWithIndexFilter() throws InterruptedException { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10433", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked(prepareCreate("index-1").setMapping("timestamp", "type=date", "field1", "type=keyword")); assertAcked(prepareCreate("index-2").setMapping("timestamp", "type=date", "field1", "type=long")); From 0ba5d5807860a799c76f0fb6e063e8c5bd458492 Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Tue, 7 Nov 2023 11:27:16 -0600 Subject: [PATCH 131/178] Fix SuggestSearch.testSkipDuplicates by forceing refresh when indexing its test documents (#11068) During the testSkipDuplicates its possible that not all documents were fully indexed by the time the search with suggestions was created, updating the indexing operations to refresh the index before returning. As its possible that did not fix the issue, I've added logging around the test case to capture the state when the error occurred that can assist in future troubleshooting. Signed-off-by: Peter Nied --- CHANGELOG.md | 1 + .../search/suggest/CompletionSuggestSearchIT.java | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 435683442d4c9..2011769858699 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -139,6 +139,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) - Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) - Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) +- Fix SuggestSearch.testSkipDuplicates by forceing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068)) - Adding version condition while adding geoshape doc values to the index, to ensure backward compatibility.([#11095](https://github.com/opensearch-project/OpenSearch/pull/11095)) ### Security diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java index 30dba87f8ef5d..b342e6d35f0b4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java @@ -79,6 +79,7 @@ import java.util.Set; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.WAIT_UNTIL; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -1171,6 +1172,7 @@ public void testSkipDuplicates() throws Exception { createIndexAndMapping(mapping); int numDocs = randomIntBetween(10, 100); int numUnique = randomIntBetween(1, numDocs); + logger.info("Suggestion duplicate parameters: numDocs {} numUnique {}", numDocs, numUnique); List indexRequestBuilders = new ArrayList<>(); int[] weights = new int[numUnique]; Integer[] termIds = new Integer[numUnique]; @@ -1180,8 +1182,10 @@ public void testSkipDuplicates() throws Exception { int weight = randomIntBetween(0, 100); weights[id] = Math.max(weight, weights[id]); String suggestion = "suggestion-" + String.format(Locale.ENGLISH, "%03d", id); + logger.info("Creating {}, id {}, weight {}", suggestion, i, id, weight); indexRequestBuilders.add( client().prepareIndex(INDEX) + .setRefreshPolicy(WAIT_UNTIL) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1195,10 +1199,12 @@ public void testSkipDuplicates() throws Exception { indexRandom(true, indexRequestBuilders); Arrays.sort(termIds, Comparator.comparingInt(o -> weights[(int) o]).reversed().thenComparingInt(a -> (int) a)); + logger.info("Expected terms id ordered {}", (Object[]) termIds); String[] expected = new String[numUnique]; for (int i = 0; i < termIds.length; i++) { expected[i] = "suggestion-" + String.format(Locale.ENGLISH, "%03d", termIds[i]); } + logger.info("Expected suggestions field values {}", (Object[]) expected); CompletionSuggestionBuilder completionSuggestionBuilder = SuggestBuilders.completionSuggestion(FIELD) .prefix("sugg") .skipDuplicates(true) @@ -1207,6 +1213,7 @@ public void testSkipDuplicates() throws Exception { SearchResponse searchResponse = client().prepareSearch(INDEX) .suggest(new SuggestBuilder().addSuggestion("suggestions", completionSuggestionBuilder)) .get(); + logger.info("Search Response with Suggestions {}", searchResponse); assertSuggestions(searchResponse, true, "suggestions", expected); } From 4d8c228d34ccc3bc060112ce0c974981113f67ed Mon Sep 17 00:00:00 2001 From: Ankit Kala Date: Wed, 8 Nov 2023 14:36:54 +0530 Subject: [PATCH 132/178] Flaky RemoteClustersIT: Add assert busy to avoid race condition (#11057) Signed-off-by: Ankit Kala --- .../cluster/remote/test/RemoteClustersIT.java | 18 +++++++++++------- .../transport/ProxyConnectionStrategy.java | 2 +- .../transport/RemoteClusterService.java | 2 +- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/RemoteClustersIT.java b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/RemoteClustersIT.java index dbea8db1a12fa..c38fcc468c673 100644 --- a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/RemoteClustersIT.java +++ b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/RemoteClustersIT.java @@ -42,11 +42,13 @@ import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.client.indices.CreateIndexRequest; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; import org.junit.After; import org.junit.Before; import java.io.IOException; +import java.util.concurrent.TimeUnit; public class RemoteClustersIT extends AbstractMultiClusterRemoteTestCase { @@ -112,7 +114,7 @@ public void testSniffModeConnectionFails() throws IOException { assertFalse(rci.isConnected()); } - public void testHAProxyModeConnectionWorks() throws IOException { + public void testHAProxyModeConnectionWorks() throws Exception { String proxyAddress = "haproxy:9600"; logger.info("Configuring remote cluster [{}]", proxyAddress); ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest().persistentSettings(Settings.builder() @@ -121,12 +123,14 @@ public void testHAProxyModeConnectionWorks() throws IOException { .build()); assertTrue(cluster1Client().cluster().putSettings(request, RequestOptions.DEFAULT).isAcknowledged()); - RemoteConnectionInfo rci = cluster1Client().cluster().remoteInfo(new RemoteInfoRequest(), RequestOptions.DEFAULT).getInfos().get(0); - logger.info("Connection info: {}", rci); - if (!rci.isConnected()) { - logger.info("Cluster health: {}", cluster1Client().cluster().health(new ClusterHealthRequest(), RequestOptions.DEFAULT)); - } - assertTrue(rci.isConnected()); + assertBusy(() -> { + RemoteConnectionInfo rci = cluster1Client().cluster().remoteInfo(new RemoteInfoRequest(), RequestOptions.DEFAULT).getInfos().get(0); + logger.info("Connection info: {}", rci); + if (!rci.isConnected()) { + logger.info("Cluster health: {}", cluster1Client().cluster().health(new ClusterHealthRequest(), RequestOptions.DEFAULT)); + } + assertTrue(rci.isConnected()); + }, 10, TimeUnit.SECONDS); assertEquals(2L, cluster1Client().search( new SearchRequest("haproxynosn:test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value); diff --git a/server/src/main/java/org/opensearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/ProxyConnectionStrategy.java index e914542023ad9..b4477edaba687 100644 --- a/server/src/main/java/org/opensearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/ProxyConnectionStrategy.java @@ -309,7 +309,7 @@ public void onResponse(Void v) { @Override public void onFailure(Exception e) { - logger.debug( + logger.error( new ParameterizedMessage( "failed to open remote connection [remote cluster: {}, address: {}]", clusterAlias, diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterService.java b/server/src/main/java/org/opensearch/transport/RemoteClusterService.java index 35691cc5f8a1e..87786fb22f22e 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterService.java @@ -270,7 +270,7 @@ protected void updateRemoteCluster(String clusterAlias, Settings settings) { // are on the cluster state thread and our custom future implementation will throw an // assertion. if (latch.await(10, TimeUnit.SECONDS) == false) { - logger.warn("failed to connect to new remote cluster {} within {}", clusterAlias, TimeValue.timeValueSeconds(10)); + logger.error("failed to connect to new remote cluster {} within {}", clusterAlias, TimeValue.timeValueSeconds(10)); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); From a782f4fb2b69f47cb180e29c3c9b4f2aea90415b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Vl=C4=8Dek?= Date: Wed, 8 Nov 2023 15:15:16 +0100 Subject: [PATCH 133/178] Deprecate CamelCase PathHierarchy tokenizer name (#10894) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Deprecate CamelCase PathHierarchy tokenizer name in favor to lowercase path_hierarchy. Signed-off-by: Lukáš Vlček --- CHANGELOG.md | 1 + .../common/CommonAnalysisModulePlugin.java | 25 +++++++++-- .../PathHierarchyTokenizerFactoryTests.java | 45 +++++++++++++++++++ .../test/analysis-common/30_tokenizers.yml | 9 +++- 4 files changed, 76 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2011769858699..772afcb0ea715 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) +- Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) ### Deprecated diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index edb8c37c2dbdd..cf2736a8583d2 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -394,7 +394,17 @@ public Map> getTokenizers() { // TODO deprecate and remove in API tokenizers.put("lowercase", XLowerCaseTokenizerFactory::new); tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new); - tokenizers.put("PathHierarchy", PathHierarchyTokenizerFactory::new); + tokenizers.put("PathHierarchy", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + // TODO Remove "PathHierarchy" tokenizer name in 4.0 and throw exception + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_3_0_0)) { + deprecationLogger.deprecate( + "PathHierarchy_tokenizer_deprecation", + "The [PathHierarchy] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [path_hierarchy] instead." + ); + } + return new PathHierarchyTokenizerFactory(indexSettings, environment, name, settings); + }); tokenizers.put("pattern", PatternTokenizerFactory::new); tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new); tokenizers.put("whitespace", WhitespaceTokenizerFactory::new); @@ -662,8 +672,17 @@ public List getPreConfiguredTokenizers() { } return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); })); - tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); - + tokenizers.add(PreConfiguredTokenizer.openSearchVersion("PathHierarchy", (version) -> { + // TODO Remove "PathHierarchy" tokenizer name in 4.0 and throw exception + if (version.onOrAfter(Version.V_3_0_0)) { + deprecationLogger.deprecate( + "PathHierarchy_tokenizer_deprecation", + "The [PathHierarchy] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [path_hierarchy] instead." + ); + } + return new PathHierarchyTokenizer(); + })); return tokenizers; } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PathHierarchyTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PathHierarchyTokenizerFactoryTests.java index 1fe7c582449ec..555d6c78b6ec5 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PathHierarchyTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PathHierarchyTokenizerFactoryTests.java @@ -35,16 +35,61 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.analysis.Tokenizer; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; +import org.opensearch.env.Environment; +import org.opensearch.env.TestEnvironment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.analysis.IndexAnalyzers; +import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTokenStreamTestCase; +import org.opensearch.test.VersionUtils; import java.io.IOException; import java.io.StringReader; +import java.util.Collections; public class PathHierarchyTokenizerFactoryTests extends OpenSearchTokenStreamTestCase { + private IndexAnalyzers buildAnalyzers(Version version, String tokenizer) throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put("index.analysis.analyzer.my_analyzer.tokenizer", tokenizer) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + return new AnalysisModule(TestEnvironment.newEnvironment(settings), Collections.singletonList(new CommonAnalysisModulePlugin())) + .getAnalysisRegistry() + .build(idxSettings); + } + + /** + * Test that deprecated "PathHierarchy" tokenizer name is still available via {@link CommonAnalysisModulePlugin} starting in 3.x. + */ + public void testPreConfiguredTokenizer() throws IOException { + + { + try ( + IndexAnalyzers indexAnalyzers = buildAnalyzers( + VersionUtils.randomVersionBetween(random(), Version.V_3_0_0, Version.CURRENT), + "PathHierarchy" + ) + ) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertTokenStreamContents(analyzer.tokenStream("dummy", "/a/b/c"), new String[] { "/a", "/a/b", "/a/b/c" }); + // Once LUCENE-12750 is fixed we can use the following testing method instead. + // Similar testing approach has been used for deprecation of (Edge)NGrams tokenizers as well. + // assertAnalyzesTo(analyzer, "/a/b/c", new String[] { "/a", "/a/b", "/a/b/c" }); + + } + } + } + public void testDefaults() throws IOException { final Index index = new Index("test", "_na_"); final Settings indexSettings = newAnalysisSettingsBuilder().build(); diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml index 56ed2175df60a..179de835a4105 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml @@ -298,6 +298,9 @@ --- "path_hierarchy": + - skip: + features: "allowed_warnings" + - do: indices.analyze: body: @@ -312,6 +315,8 @@ - match: { detail.tokenizer.tokens.2.token: a/b/c } - do: + allowed_warnings: + - 'The [PathHierarchy] tokenizer name is deprecated and will be removed in a future version. Please change the tokenizer name to [path_hierarchy] instead.' indices.analyze: body: text: "a/b/c" @@ -337,11 +342,13 @@ - match: { detail.tokenizer.tokens.2.token: a/b/c } - do: + allowed_warnings: + - 'The [PathHierarchy] tokenizer name is deprecated and will be removed in a future version. Please change the tokenizer name to [path_hierarchy] instead.' indices.analyze: body: text: "a/b/c" explain: true - tokenizer: PathHierarchy + tokenizer: PathHierarchy - length: { detail.tokenizer.tokens: 3 } - match: { detail.tokenizer.name: PathHierarchy } - match: { detail.tokenizer.tokens.0.token: a } From ff3a6a3af549ef8948c99b95500b4bf4f8be76f0 Mon Sep 17 00:00:00 2001 From: Ethan Emoto <70109490+e-emoto@users.noreply.github.com> Date: Wed, 8 Nov 2023 07:53:41 -0800 Subject: [PATCH 134/178] Fixed test cases for concurrent search (#11119) Fixed test cases Updated fix for TransportTwoNodesSearchIT test cases Updated test cases and muted flaky tests Move indexRandomForConcurrentSearch call into prepareData Remove changes from TransportTwoNodesSearchIT.java until future fix Signed-off-by: Ethan Emoto --- .../search/aggregations/pipeline/DerivativeIT.java | 1 + .../search/aggregations/pipeline/MovAvgIT.java | 1 + .../search/basic/SearchRedStateIndexIT.java | 1 + .../search/basic/SearchWhileCreatingIndexIT.java | 1 + .../search/fetch/FetchSubPhasePluginIT.java | 5 +++++ .../functionscore/RandomScoreFunctionIT.java | 3 +++ .../org/opensearch/search/geo/GeoFilterIT.java | 2 ++ .../search/morelikethis/MoreLikeThisIT.java | 14 +++++++++++++- 8 files changed, 27 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java index 18484c8a60ed7..41bbffc13658b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java @@ -693,6 +693,7 @@ public void testAvgMovavgDerivNPE() throws Exception { } refresh(); + indexRandomForConcurrentSearch("movavg_npe"); SearchResponse response = client().prepareSearch("movavg_npe") .addAggregation( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java index 2aad0d2d38901..8ad3107ac33ac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java @@ -1320,6 +1320,7 @@ public void testPredictWithNonEmptyBuckets() throws Exception { .setSource(jsonBuilder().startObject().field(INTERVAL_FIELD, i).field(VALUE_FIELD2, 10).endObject()) ); } + indexRandomForConcurrentSearch("predict_non_empty"); bulkBuilder.get(); ensureSearchable(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java index 44c4981dfdb36..bd623ccdf2731 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java @@ -153,6 +153,7 @@ private void buildRedIndex(int numShards) throws Exception { client().prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); } refresh(); + indexRandomForConcurrentSearch("test"); internalCluster().stopRandomDataNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java index 71af7215c4eb7..a5989b693d332 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java @@ -106,6 +106,7 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) } client().prepareIndex("test").setId(id).setSource("field", "test").get(); RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").get(); + indexRandomForConcurrentSearch("test"); // at least one shard should be successful when refreshing assertThat(refreshResponse.getSuccessfulShards(), greaterThanOrEqualTo(1)); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java index 86df25c4dad65..77ddc5bf636f0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java @@ -100,6 +100,10 @@ protected Collection> nodePlugins() { @SuppressWarnings("unchecked") public void testPlugin() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11112", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); client().admin() .indices() .prepareCreate("test") @@ -119,6 +123,7 @@ public void testPlugin() throws Exception { .actionGet(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setSource(new SearchSourceBuilder().ext(Collections.singletonList(new TermVectorsFetchBuilder("test")))) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java index 8f43cefd2d53b..54cfcb7e6a683 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java @@ -207,6 +207,7 @@ public void testScoreAccessWithinScript() throws Exception { .get(); } refresh(); + indexRandomForConcurrentSearch("test"); Map params = new HashMap<>(); params.put("factor", randomIntBetween(2, 4)); @@ -368,6 +369,7 @@ public void testScoreRange() throws Exception { } flush(); refresh(); + indexRandomForConcurrentSearch("test"); int iters = scaledRandomIntBetween(10, 20); for (int i = 0; i < iters; ++i) { SearchResponse searchResponse = client().prepareSearch() @@ -390,6 +392,7 @@ public void testSeeds() throws Exception { index("test", "type", "" + i, jsonBuilder().startObject().endObject()); } flushAndRefresh(); + indexRandomForConcurrentSearch("test"); assertNoFailures( client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java index 00524c6e04707..ba519be04edff 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java @@ -266,6 +266,7 @@ public void testShapeRelations() throws Exception { client().prepareIndex("shapes").setId("1").setSource(data, MediaTypeRegistry.JSON).get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("shapes"); // Point in polygon SearchResponse result = client().prepareSearch() @@ -427,6 +428,7 @@ public void testBulk() throws Exception { client().admin().indices().prepareCreate("countries").setSettings(settings).setMapping(xContentBuilder).get(); BulkResponse bulk = client().prepareBulk().add(bulkAction, 0, bulkAction.length, null, xContentBuilder.contentType()).get(); + indexRandomForConcurrentSearch("countries"); for (BulkItemResponse item : bulk.getItems()) { assertFalse("unable to index data", item.isFailed()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java index 87435bb0bd09d..dc7c4e687c2fa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java @@ -125,6 +125,7 @@ public void testSimpleMoreLikeThis() throws Exception { client().index(indexRequest("test").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())) .actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("Running moreLikeThis"); SearchResponse response = client().prepareSearch() @@ -155,6 +156,7 @@ public void testSimpleMoreLikeThisWithTypes() throws Exception { client().index(indexRequest("test").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())) .actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("Running moreLikeThis"); SearchResponse response = client().prepareSearch() @@ -190,6 +192,7 @@ public void testMoreLikeThisForZeroTokensInOneOfTheAnalyzedFields() throws Excep ).actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery( @@ -256,6 +259,7 @@ public void testMoreLikeThisWithAliases() throws Exception { client().index(indexRequest("test").id("4").source(jsonBuilder().startObject().field("text", "opensearch release").endObject())) .actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("Running moreLikeThis on index"); SearchResponse response = client().prepareSearch() @@ -304,6 +308,7 @@ public void testMoreLikeThisWithAliasesInLikeDocuments() throws Exception { client().index(indexRequest(indexName).id("3").source(jsonBuilder().startObject().field("text", "opensearch index").endObject())) .actionGet(); refresh(indexName); + indexRandomForConcurrentSearch(indexName); SearchResponse response = client().prepareSearch() .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item(aliasName, "1") }).minTermFreq(1).minDocFreq(1)) @@ -321,6 +326,7 @@ public void testMoreLikeThisIssue2197() throws Exception { .get(); client().admin().indices().prepareRefresh("foo").get(); assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); + indexRandomForConcurrentSearch("foo"); SearchResponse response = client().prepareSearch() .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1") })) @@ -344,6 +350,7 @@ public void testMoreLikeWithCustomRouting() throws Exception { .setRouting("2") .get(); client().admin().indices().prepareRefresh("foo").get(); + indexRandomForConcurrentSearch("foo"); SearchResponse response = client().prepareSearch() .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1").routing("2") })) @@ -368,6 +375,7 @@ public void testMoreLikeThisIssueRoutingNotSerialized() throws Exception { .setRouting("4000") .get(); client().admin().indices().prepareRefresh("foo").get(); + indexRandomForConcurrentSearch("foo"); SearchResponse response = client().prepareSearch() .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1").routing("4000") })) .get(); @@ -401,6 +409,7 @@ public void testNumericField() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test"); // Implicit list of fields -> ignore numeric fields SearchResponse searchResponse = client().prepareSearch() @@ -508,6 +517,7 @@ public void testMoreLikeThisWithFieldAlias() throws Exception { index("test", "_doc", "1", "text", "lucene"); index("test", "_doc", "2", "text", "lucene release"); refresh(); + indexRandomForConcurrentSearch("test"); Item item = new Item("test", "1"); QueryBuilder query = QueryBuilders.moreLikeThisQuery(new String[] { "alias" }, null, new Item[] { item }) @@ -548,6 +558,7 @@ public void testSimpleMoreLikeInclude() throws Exception { .source(jsonBuilder().startObject().field("text", "Lucene has been ported to other programming languages").endObject()) ).actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("Running More Like This with include true"); SearchResponse response = client().prepareSearch() @@ -832,11 +843,12 @@ public void testSelectFields() throws IOException, ExecutionException, Interrupt assertHitCount(response, 1); } - public void testWithRouting() throws IOException { + public void testWithRouting() throws IOException, InterruptedException { client().prepareIndex("index").setId("1").setRouting("3").setSource("text", "this is a document").get(); client().prepareIndex("index").setId("2").setRouting("1").setSource("text", "this is another document").get(); client().prepareIndex("index").setId("3").setRouting("4").setSource("text", "this is yet another document").get(); refresh("index"); + indexRandomForConcurrentSearch("index"); Item item = new Item("index", "2").routing("1"); MoreLikeThisQueryBuilder moreLikeThisQueryBuilder = new MoreLikeThisQueryBuilder( From 0a9dfec4670ca8cab2bd62d79d474acbfc957b6a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Nov 2023 14:52:29 -0500 Subject: [PATCH 135/178] Bump aws-actions/configure-aws-credentials from 2 to 4 (#10504) * Bump aws-actions/configure-aws-credentials from 2 to 4 Bumps [aws-actions/configure-aws-credentials](https://github.com/aws-actions/configure-aws-credentials) from 2 to 4. - [Release notes](https://github.com/aws-actions/configure-aws-credentials/releases) - [Changelog](https://github.com/aws-actions/configure-aws-credentials/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws-actions/configure-aws-credentials/compare/v2...v4) --- updated-dependencies: - dependency-name: aws-actions/configure-aws-credentials dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/lucene-snapshots.yml | 2 +- .github/workflows/publish-maven-snapshots.yml | 2 +- CHANGELOG.md | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml index 76981276fe085..ec5893ea546a9 100644 --- a/.github/workflows/lucene-snapshots.yml +++ b/.github/workflows/lucene-snapshots.yml @@ -50,7 +50,7 @@ jobs: run: ./gradlew publishJarsPublicationToMavenLocal -Pversion.suffix=snapshot-${{ steps.version.outputs.REVISION }} - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v2 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.LUCENE_SNAPSHOTS_ROLE }} aws-region: us-west-2 diff --git a/.github/workflows/publish-maven-snapshots.yml b/.github/workflows/publish-maven-snapshots.yml index 8c08df269a999..93bbfb8bbeab8 100644 --- a/.github/workflows/publish-maven-snapshots.yml +++ b/.github/workflows/publish-maven-snapshots.yml @@ -26,7 +26,7 @@ jobs: java-version: 17 - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v2 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.PUBLISH_SNAPSHOTS_ROLE }} aws-region: us-east-1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 772afcb0ea715..8ff0d4e3fd285 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -115,6 +115,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) - Bump `com.squareup.okio:okio` from 3.5.0 to 3.6.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637)) - Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.21.1 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000)) +- Bump `aws-actions/configure-aws-credentials` from 2 to 4 ([#10504](https://github.com/opensearch-project/OpenSearch/pull/10504)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) @@ -146,4 +147,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x \ No newline at end of file From 61a598bad7ce095219790e78196b1ae5b7428316 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Thu, 9 Nov 2023 12:59:13 -0500 Subject: [PATCH 136/178] [Refactor] Use iterative approach to evaluate Regex.simpleMatch (#11060) * Remove adjacent duplicates to optimize regex before processing Signed-off-by: Craig Perkins * Add tests Signed-off-by: Craig Perkins * Rename to removeAdjacentDuplicates Signed-off-by: Craig Perkins * Add additional test case Signed-off-by: Craig Perkins * Add CHANGELOG entry Signed-off-by: Craig Perkins * Throw IllegalArgumentException if input is invalid Signed-off-by: Craig Perkins * Add a space Signed-off-by: Craig Perkins * Change error message Signed-off-by: Craig Perkins * Use iterative solution with 2 pointers Signed-off-by: Craig Perkins * Remove unused method Signed-off-by: Craig Perkins * add tests and changelog Signed-off-by: Stephen Crawford * remove bad push Signed-off-by: Stephen Crawford * Update CHANGELOG.md Signed-off-by: Stephen Crawford <65832608+scrawfor99@users.noreply.github.com> * Update CHANGELOG.md Signed-off-by: Stephen Crawford <65832608+scrawfor99@users.noreply.github.com> * spotless Signed-off-by: Stephen Crawford * Update comment Signed-off-by: Craig Perkins * re-run CI Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins Signed-off-by: Craig Perkins Signed-off-by: Stephen Crawford Signed-off-by: Stephen Crawford <65832608+scrawfor99@users.noreply.github.com> Co-authored-by: Stephen Crawford Co-authored-by: Stephen Crawford <65832608+scrawfor99@users.noreply.github.com> --- CHANGELOG.md | 3 +- .../org/opensearch/common/regex/Regex.java | 54 +++++++++---------- .../opensearch/common/regex/RegexTests.java | 15 ++++++ 3 files changed, 44 insertions(+), 28 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8ff0d4e3fd285..e8f27b9323306 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -129,6 +129,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999)) - Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) - Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) +- Use iterative approach to evaluate Regex.simpleMatch ([#11060](https://github.com/opensearch-project/OpenSearch/pull/11060)) ### Deprecated @@ -147,4 +148,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x diff --git a/server/src/main/java/org/opensearch/common/regex/Regex.java b/server/src/main/java/org/opensearch/common/regex/Regex.java index 396af77c8a751..323b460af62df 100644 --- a/server/src/main/java/org/opensearch/common/regex/Regex.java +++ b/server/src/main/java/org/opensearch/common/regex/Regex.java @@ -129,35 +129,35 @@ public static boolean simpleMatch(String pattern, String str, boolean caseInsens } private static boolean simpleMatchWithNormalizedStrings(String pattern, String str) { - final int firstIndex = pattern.indexOf('*'); - if (firstIndex == -1) { - return pattern.equals(str); - } - if (firstIndex == 0) { - if (pattern.length() == 1) { - return true; - } - final int nextIndex = pattern.indexOf('*', firstIndex + 1); - if (nextIndex == -1) { - // str.endsWith(pattern.substring(1)), but avoiding the construction of pattern.substring(1): - return str.regionMatches(str.length() - pattern.length() + 1, pattern, 1, pattern.length() - 1); - } else if (nextIndex == 1) { - // Double wildcard "**" - skipping the first "*" - return simpleMatchWithNormalizedStrings(pattern.substring(1), str); - } - final String part = pattern.substring(1, nextIndex); - int partIndex = str.indexOf(part); - while (partIndex != -1) { - if (simpleMatchWithNormalizedStrings(pattern.substring(nextIndex), str.substring(partIndex + part.length()))) { - return true; - } - partIndex = str.indexOf(part, partIndex + 1); + int sIdx = 0, pIdx = 0, match = 0, wildcardIdx = -1; + while (sIdx < str.length()) { + // both chars matching, incrementing both pointers + if (pIdx < pattern.length() && str.charAt(sIdx) == pattern.charAt(pIdx)) { + sIdx++; + pIdx++; + } else if (pIdx < pattern.length() && pattern.charAt(pIdx) == '*') { + // wildcard found, only incrementing pattern pointer + wildcardIdx = pIdx; + match = sIdx; + pIdx++; + } else if (wildcardIdx != -1) { + // last pattern pointer was a wildcard, incrementing string pointer + pIdx = wildcardIdx + 1; + match++; + sIdx = match; + } else { + // current pattern pointer is not a wildcard, last pattern pointer was also not a wildcard + // characters do not match + return false; } - return false; } - return str.regionMatches(0, pattern, 0, firstIndex) - && (firstIndex == pattern.length() - 1 // only wildcard in pattern is at the end, so no need to look at the rest of the string - || simpleMatchWithNormalizedStrings(pattern.substring(firstIndex), str.substring(firstIndex))); + + // check for remaining characters in pattern + while (pIdx < pattern.length() && pattern.charAt(pIdx) == '*') { + pIdx++; + } + + return pIdx == pattern.length(); } /** diff --git a/server/src/test/java/org/opensearch/common/regex/RegexTests.java b/server/src/test/java/org/opensearch/common/regex/RegexTests.java index b92fcdad56d74..21d3cb2df8f61 100644 --- a/server/src/test/java/org/opensearch/common/regex/RegexTests.java +++ b/server/src/test/java/org/opensearch/common/regex/RegexTests.java @@ -96,7 +96,22 @@ public void testDoubleWildcardMatch() { assertFalse(Regex.simpleMatch("fff**ddd", "fffabcdd")); assertTrue(Regex.simpleMatch("fff*******ddd", "fffabcddd")); assertTrue(Regex.simpleMatch("fff*******ddd", "FffAbcdDd", true)); + assertFalse(Regex.simpleMatch("fff*******ddd", "FffAbcdDd", false)); assertFalse(Regex.simpleMatch("fff******ddd", "fffabcdd")); + assertTrue(Regex.simpleMatch("abCDefGH******ddd", "abCDefGHddd", false)); + assertTrue(Regex.simpleMatch("******", "a")); + assertTrue(Regex.simpleMatch("***WILDcard***", "aaaaaaaaWILDcardZZZZZZ", false)); + assertFalse(Regex.simpleMatch("***xxxxx123456789xxxxxx***", "xxxxxabcdxxxxx", false)); + assertFalse(Regex.simpleMatch("***xxxxxabcdxxxxx***", "xxxxxABCDxxxxx", false)); + assertTrue(Regex.simpleMatch("***xxxxxabcdxxxxx***", "xxxxxABCDxxxxx", true)); + assertTrue(Regex.simpleMatch("**stephenIsSuperCool**", "ItIsTrueThatStephenIsSuperCoolSoYouShouldLetThisIn", true)); + assertTrue( + Regex.simpleMatch( + "**w**X**y**Z**", + "abcdeFGHIJKLMNOPqrstuvwabcdeFGHIJKLMNOPqrstuvwXabcdeFGHIJKLMNOPqrstuvwXyabcdeFGHIJKLMNOPqrstuvwXyZ", + false + ) + ); } public void testSimpleMatch() { From 3eda422ae1979d3e1e7b158abab7d179f919f868 Mon Sep 17 00:00:00 2001 From: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Date: Thu, 9 Nov 2023 13:37:32 -0800 Subject: [PATCH 137/178] Fix slice collectors to leaves association with post filter (#11134) * Fix slice collectors to leaves association with profile enabled (#11134) Signed-off-by: Ticheng Lin * Update server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java Co-authored-by: Andriy Redko Signed-off-by: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Signed-off-by: Ticheng Lin --------- Signed-off-by: Ticheng Lin Signed-off-by: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Co-authored-by: Andriy Redko --- .../opensearch/common/lucene/search/FilteredCollector.java | 4 ++++ .../profile/query/ConcurrentQueryProfileBreakdown.java | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java b/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java index 0d7a8866f7788..b5c0e84a10308 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.opensearch.common.lucene.Lucene; +import org.opensearch.search.profile.query.ProfileWeight; import java.io.IOException; @@ -64,6 +65,9 @@ public Collector getCollector() { @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + if (filter instanceof ProfileWeight) { + ((ProfileWeight) filter).associateCollectorToLeaves(context, collector); + } final ScorerSupplier filterScorerSupplier = filter.scorerSupplier(context); final LeafCollector in = collector.getLeafCollector(context); final Bits bits = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorerSupplier); diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index 59ef01f9f947a..e1d41227a22f7 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -83,7 +83,10 @@ public Map toBreakdownMap() { // creates a new weight and breakdown map for each rewritten query. This new breakdown map captures the timing information for // the new rewritten query. The sliceCollectorsToLeaves is empty because this breakdown for rewritten query gets created later // in search leaf path which doesn't have collector. Also, this is not needed since this breakdown is per leaf and there is no - // concurrency involved. An empty sliceCollectorsToLeaves could also happen in the case of early termination. + // concurrency involved. + assert contexts.size() == 1 : "Unexpected size: " + + contexts.size() + + " of leaves breakdown in ConcurrentQueryProfileBreakdown of rewritten query for a leaf."; AbstractProfileBreakdown breakdown = contexts.values().iterator().next(); queryNodeTime = breakdown.toNodeTime() + createWeightTime; maxSliceNodeTime = 0L; From 0d54c1600b16081c15902a095111a9a530767099 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 9 Nov 2023 18:41:19 -0500 Subject: [PATCH 138/178] [BWC and API enforcement] Decorate the existing APIs with proper annotations (part 2) (#10950) Signed-off-by: Andriy Redko --- .../opensearch/common/CheckedBiConsumer.java | 5 ++++- .../opensearch/common/CheckedFunction.java | 3 +++ .../org/opensearch/common/TriFunction.java | 5 ++++- .../org/opensearch/common/collect/Tuple.java | 5 ++++- .../common/crypto/MasterKeyProvider.java | 5 +++++ .../common/io/InputStreamContainer.java | 5 ++++- .../opensearch/OpenSearchParseException.java | 4 +++- .../core/action/NotifyOnceListener.java | 5 ++++- .../org/opensearch/core/common/text/Text.java | 4 +++- .../common/transport/TransportAddress.java | 4 +++- .../tasks/resourcetracker/ResourceStats.java | 5 ++++- .../resourcetracker/ResourceStatsType.java | 5 ++++- .../resourcetracker/ResourceUsageInfo.java | 4 +++- .../resourcetracker/ResourceUsageMetric.java | 5 ++++- .../resourcetracker/TaskResourceStats.java | 4 +++- .../resourcetracker/TaskResourceUsage.java | 4 +++- .../resourcetracker/TaskThreadUsage.java | 4 +++- .../resourcetracker/ThreadResourceInfo.java | 5 ++++- .../core/xcontent/AbstractObjectParser.java | 4 +++- .../core/xcontent/ContextParser.java | 5 +++++ .../core/xcontent/DeprecationHandler.java | 5 +++++ .../opensearch/core/xcontent/ToXContent.java | 7 ++++-- .../opensearch/core/xcontent/XContent.java | 5 +++++ .../core/xcontent/XContentParser.java | 4 +++- .../common/xcontent/XContentType.java | 3 +++ .../OpenSearchGenerationException.java | 4 +++- .../ActionRequestValidationException.java | 2 ++ .../opensearch/action/DocWriteRequest.java | 7 +++++- .../opensearch/action/DocWriteResponse.java | 7 +++++- .../org/opensearch/action/IndicesRequest.java | 4 +++- .../ClusterAllocationExplainRequest.java | 4 +++- ...lusterAllocationExplainRequestBuilder.java | 4 +++- .../ClusterAllocationExplainResponse.java | 4 +++- .../admin/cluster/crypto/CryptoSettings.java | 4 +++- .../DeleteDecommissionStateRequest.java | 4 +++- ...DeleteDecommissionStateRequestBuilder.java | 4 +++- .../DeleteDecommissionStateResponse.java | 4 +++- .../get/GetDecommissionStateRequest.java | 4 +++- .../GetDecommissionStateRequestBuilder.java | 4 +++- .../get/GetDecommissionStateResponse.java | 4 +++- .../awareness/put/DecommissionRequest.java | 4 +++- .../put/DecommissionRequestBuilder.java | 4 +++- .../awareness/put/DecommissionResponse.java | 4 +++- .../cluster/health/ClusterHealthRequest.java | 7 ++++-- .../health/ClusterHealthRequestBuilder.java | 4 +++- .../cluster/health/ClusterHealthResponse.java | 4 +++- .../hotthreads/NodesHotThreadsRequest.java | 4 +++- .../NodesHotThreadsRequestBuilder.java | 4 +++- .../hotthreads/NodesHotThreadsResponse.java | 4 +++- .../cluster/node/info/NodesInfoRequest.java | 4 +++- .../node/info/NodesInfoRequestBuilder.java | 4 +++- .../cluster/node/info/NodesInfoResponse.java | 4 +++- ...desReloadSecureSettingsRequestBuilder.java | 4 +++- .../cluster/node/stats/NodesStatsRequest.java | 4 +++- .../node/stats/NodesStatsRequestBuilder.java | 4 +++- .../node/stats/NodesStatsResponse.java | 4 +++- .../node/tasks/cancel/CancelTasksRequest.java | 4 +++- .../cancel/CancelTasksRequestBuilder.java | 4 +++- .../tasks/cancel/CancelTasksResponse.java | 4 +++- .../node/tasks/get/GetTaskRequest.java | 4 +++- .../node/tasks/get/GetTaskRequestBuilder.java | 4 +++- .../node/tasks/get/GetTaskResponse.java | 4 +++- .../node/tasks/list/ListTasksRequest.java | 4 +++- .../tasks/list/ListTasksRequestBuilder.java | 4 +++- .../node/tasks/list/ListTasksResponse.java | 4 +++- .../cluster/node/tasks/list/TaskGroup.java | 7 ++++-- .../cluster/node/usage/NodesUsageRequest.java | 4 +++- .../node/usage/NodesUsageRequestBuilder.java | 4 +++- .../node/usage/NodesUsageResponse.java | 4 +++- .../restore/RestoreRemoteStoreRequest.java | 4 +++- .../restore/RestoreRemoteStoreResponse.java | 4 +++- .../remotestore/stats/RemoteStoreStats.java | 4 +++- .../stats/RemoteStoreStatsRequest.java | 4 +++- .../stats/RemoteStoreStatsRequestBuilder.java | 4 +++- .../stats/RemoteStoreStatsResponse.java | 4 +++- .../cleanup/CleanupRepositoryRequest.java | 4 +++- .../CleanupRepositoryRequestBuilder.java | 4 +++- .../cleanup/CleanupRepositoryResponse.java | 4 +++- .../delete/DeleteRepositoryRequest.java | 4 +++- .../DeleteRepositoryRequestBuilder.java | 4 +++- .../get/GetRepositoriesRequest.java | 4 +++- .../get/GetRepositoriesRequestBuilder.java | 4 +++- .../get/GetRepositoriesResponse.java | 4 +++- .../put/PutRepositoryRequest.java | 4 +++- .../put/PutRepositoryRequestBuilder.java | 4 +++- .../verify/VerifyRepositoryRequest.java | 4 +++- .../VerifyRepositoryRequestBuilder.java | 4 +++- .../verify/VerifyRepositoryResponse.java | 7 ++++-- .../reroute/ClusterRerouteRequest.java | 4 +++- .../reroute/ClusterRerouteRequestBuilder.java | 4 +++- .../reroute/ClusterRerouteResponse.java | 4 +++- .../ClusterUpdateSettingsRequest.java | 4 +++- .../ClusterUpdateSettingsRequestBuilder.java | 4 +++- .../ClusterUpdateSettingsResponse.java | 4 +++- .../shards/ClusterSearchShardsGroup.java | 4 +++- .../shards/ClusterSearchShardsRequest.java | 4 +++- .../ClusterSearchShardsRequestBuilder.java | 4 +++- .../shards/ClusterSearchShardsResponse.java | 4 +++- .../ClusterDeleteWeightedRoutingRequest.java | 4 +++- ...erDeleteWeightedRoutingRequestBuilder.java | 4 +++- .../ClusterDeleteWeightedRoutingResponse.java | 4 +++- .../get/ClusterGetWeightedRoutingRequest.java | 4 +++- ...usterGetWeightedRoutingRequestBuilder.java | 4 +++- .../ClusterGetWeightedRoutingResponse.java | 4 +++- .../put/ClusterPutWeightedRoutingRequest.java | 4 +++- ...usterPutWeightedRoutingRequestBuilder.java | 4 +++- .../ClusterPutWeightedRoutingResponse.java | 4 +++- .../snapshots/clone/CloneSnapshotRequest.java | 4 +++- .../clone/CloneSnapshotRequestBuilder.java | 4 +++- .../create/CreateSnapshotRequest.java | 4 +++- .../create/CreateSnapshotRequestBuilder.java | 4 +++- .../create/CreateSnapshotResponse.java | 4 +++- .../delete/DeleteSnapshotRequest.java | 4 +++- .../delete/DeleteSnapshotRequestBuilder.java | 4 +++- .../snapshots/get/GetSnapshotsRequest.java | 4 +++- .../get/GetSnapshotsRequestBuilder.java | 4 +++- .../snapshots/get/GetSnapshotsResponse.java | 4 +++- .../restore/RestoreSnapshotRequest.java | 7 +++++- .../RestoreSnapshotRequestBuilder.java | 4 +++- .../restore/RestoreSnapshotResponse.java | 4 +++- .../status/SnapshotIndexShardStage.java | 5 ++++- .../status/SnapshotIndexShardStatus.java | 4 +++- .../snapshots/status/SnapshotIndexStatus.java | 4 +++- .../snapshots/status/SnapshotShardsStats.java | 4 +++- .../snapshots/status/SnapshotStats.java | 4 +++- .../status/SnapshotsStatusRequest.java | 4 +++- .../status/SnapshotsStatusRequestBuilder.java | 4 +++- .../status/SnapshotsStatusResponse.java | 4 +++- .../cluster/state/ClusterStateRequest.java | 4 +++- .../state/ClusterStateRequestBuilder.java | 4 +++- .../cluster/state/ClusterStateResponse.java | 4 +++- .../admin/cluster/stats/AnalysisStats.java | 4 +++- .../cluster/stats/ClusterStatsIndices.java | 7 ++++-- .../cluster/stats/ClusterStatsNodes.java | 19 +++++++++++----- .../cluster/stats/ClusterStatsRequest.java | 4 +++- .../stats/ClusterStatsRequestBuilder.java | 4 +++- .../cluster/stats/ClusterStatsResponse.java | 4 +++- .../cluster/stats/IndexFeatureStats.java | 4 +++- .../admin/cluster/stats/MappingStats.java | 4 +++- .../DeleteStoredScriptRequest.java | 4 +++- .../DeleteStoredScriptRequestBuilder.java | 4 +++- .../storedscripts/GetStoredScriptRequest.java | 4 +++- .../GetStoredScriptRequestBuilder.java | 4 +++- .../GetStoredScriptResponse.java | 4 +++- .../storedscripts/PutStoredScriptRequest.java | 4 +++- .../PutStoredScriptRequestBuilder.java | 4 +++- .../tasks/PendingClusterTasksRequest.java | 4 +++- .../PendingClusterTasksRequestBuilder.java | 4 +++- .../tasks/PendingClusterTasksResponse.java | 4 +++- .../action/admin/indices/alias/Alias.java | 4 +++- .../indices/alias/IndicesAliasesRequest.java | 10 ++++++--- .../alias/IndicesAliasesRequestBuilder.java | 4 +++- .../indices/alias/get/GetAliasesRequest.java | 4 +++- .../alias/get/GetAliasesRequestBuilder.java | 4 +++- .../indices/alias/get/GetAliasesResponse.java | 4 +++- .../admin/indices/analyze/AnalyzeAction.java | 22 +++++++++++++------ .../analyze/AnalyzeRequestBuilder.java | 4 +++- .../cache/clear/ClearIndicesCacheRequest.java | 4 +++- .../ClearIndicesCacheRequestBuilder.java | 4 +++- .../clear/ClearIndicesCacheResponse.java | 4 +++- .../indices/close/CloseIndexRequest.java | 4 +++- .../close/CloseIndexRequestBuilder.java | 4 +++- .../indices/close/CloseIndexResponse.java | 13 +++++++---- .../indices/create/CreateIndexRequest.java | 4 +++- .../create/CreateIndexRequestBuilder.java | 4 +++- .../indices/create/CreateIndexResponse.java | 4 +++- .../delete/DeleteDanglingIndexRequest.java | 4 +++- .../ImportDanglingIndexRequest.java | 4 +++- .../list/ListDanglingIndicesRequest.java | 4 +++- .../list/ListDanglingIndicesResponse.java | 4 +++- .../datastream/CreateDataStreamAction.java | 7 ++++-- .../datastream/DeleteDataStreamAction.java | 7 ++++-- .../datastream/GetDataStreamAction.java | 13 +++++++---- .../indices/delete/DeleteIndexRequest.java | 4 +++- .../delete/DeleteIndexRequestBuilder.java | 4 +++- .../exists/indices/IndicesExistsRequest.java | 4 +++- .../indices/IndicesExistsRequestBuilder.java | 4 +++- .../exists/indices/IndicesExistsResponse.java | 4 +++- .../admin/indices/flush/FlushRequest.java | 4 +++- .../indices/flush/FlushRequestBuilder.java | 4 +++- .../admin/indices/flush/FlushResponse.java | 4 +++- .../indices/forcemerge/ForceMergeRequest.java | 4 +++- .../forcemerge/ForceMergeRequestBuilder.java | 4 +++- .../forcemerge/ForceMergeResponse.java | 4 +++- .../admin/indices/get/GetIndexRequest.java | 7 ++++-- .../indices/get/GetIndexRequestBuilder.java | 4 +++- .../admin/indices/get/GetIndexResponse.java | 4 +++- .../mapping/get/GetFieldMappingsRequest.java | 4 +++- .../get/GetFieldMappingsRequestBuilder.java | 6 +++-- .../mapping/get/GetFieldMappingsResponse.java | 7 ++++-- .../mapping/get/GetMappingsRequest.java | 4 +++- .../get/GetMappingsRequestBuilder.java | 4 +++- .../mapping/get/GetMappingsResponse.java | 4 +++- .../mapping/put/PutMappingRequest.java | 4 +++- .../mapping/put/PutMappingRequestBuilder.java | 4 +++- .../admin/indices/open/OpenIndexRequest.java | 4 +++- .../indices/open/OpenIndexRequestBuilder.java | 4 +++- .../admin/indices/open/OpenIndexResponse.java | 4 +++- .../readonly/AddIndexBlockRequest.java | 4 +++- .../readonly/AddIndexBlockRequestBuilder.java | 4 +++- .../readonly/AddIndexBlockResponse.java | 13 +++++++---- .../indices/recovery/RecoveryRequest.java | 4 +++- .../recovery/RecoveryRequestBuilder.java | 4 +++- .../indices/recovery/RecoveryResponse.java | 4 +++- .../admin/indices/refresh/RefreshRequest.java | 4 +++- .../refresh/RefreshRequestBuilder.java | 4 +++- .../indices/refresh/RefreshResponse.java | 4 +++- .../SegmentReplicationStatsRequest.java | 4 +++- ...SegmentReplicationStatsRequestBuilder.java | 4 +++- .../SegmentReplicationStatsResponse.java | 4 +++- .../indices/resolve/ResolveIndexAction.java | 19 +++++++++++----- .../admin/indices/rollover/Condition.java | 10 ++++++--- .../indices/rollover/RolloverRequest.java | 4 +++- .../rollover/RolloverRequestBuilder.java | 4 +++- .../indices/rollover/RolloverResponse.java | 4 +++- .../admin/indices/segments/IndexSegments.java | 5 ++++- .../indices/segments/IndexShardSegments.java | 4 +++- .../segments/IndicesSegmentResponse.java | 4 +++- .../segments/IndicesSegmentsRequest.java | 4 +++- .../IndicesSegmentsRequestBuilder.java | 4 +++- .../indices/segments/PitSegmentsRequest.java | 4 ++++ .../admin/indices/segments/ShardSegments.java | 4 +++- .../settings/get/GetSettingsRequest.java | 4 +++- .../get/GetSettingsRequestBuilder.java | 4 +++- .../settings/get/GetSettingsResponse.java | 4 +++- .../settings/put/UpdateSettingsRequest.java | 4 +++- .../put/UpdateSettingsRequestBuilder.java | 4 +++- .../IndicesShardStoreRequestBuilder.java | 4 +++- .../shards/IndicesShardStoresRequest.java | 4 +++- .../shards/IndicesShardStoresResponse.java | 13 ++++++++--- .../admin/indices/shrink/ResizeRequest.java | 4 +++- .../indices/shrink/ResizeRequestBuilder.java | 4 +++- .../admin/indices/shrink/ResizeResponse.java | 4 +++- .../admin/indices/shrink/ResizeType.java | 5 ++++- .../admin/indices/stats/CommonStats.java | 4 +++- .../admin/indices/stats/CommonStatsFlags.java | 7 ++++-- .../admin/indices/stats/IndexShardStats.java | 4 +++- .../admin/indices/stats/IndexStats.java | 5 ++++- .../indices/stats/IndicesStatsRequest.java | 4 +++- .../stats/IndicesStatsRequestBuilder.java | 4 +++- .../indices/stats/IndicesStatsResponse.java | 4 +++- .../admin/indices/stats/ShardStats.java | 4 +++- .../delete/DeleteIndexTemplateRequest.java | 4 +++- .../DeleteIndexTemplateRequestBuilder.java | 4 +++- .../get/GetIndexTemplatesRequest.java | 4 +++- .../get/GetIndexTemplatesRequestBuilder.java | 4 +++- .../get/GetIndexTemplatesResponse.java | 4 +++- .../post/SimulateIndexTemplateRequest.java | 4 +++- .../post/SimulateIndexTemplateResponse.java | 4 +++- .../put/PutComposableIndexTemplateAction.java | 7 ++++-- .../template/put/PutIndexTemplateRequest.java | 4 +++- .../put/PutIndexTemplateRequestBuilder.java | 4 +++- .../upgrade/get/IndexShardUpgradeStatus.java | 4 +++- .../upgrade/get/IndexUpgradeStatus.java | 5 ++++- .../upgrade/get/ShardUpgradeStatus.java | 4 +++- .../upgrade/get/UpgradeStatusRequest.java | 4 +++- .../get/UpgradeStatusRequestBuilder.java | 4 +++- .../upgrade/get/UpgradeStatusResponse.java | 4 +++- .../indices/upgrade/post/UpgradeRequest.java | 4 +++- .../upgrade/post/UpgradeRequestBuilder.java | 4 +++- .../indices/upgrade/post/UpgradeResponse.java | 4 +++- .../validate/query/QueryExplanation.java | 4 +++- .../validate/query/ValidateQueryRequest.java | 4 +++- .../query/ValidateQueryRequestBuilder.java | 4 +++- .../validate/query/ValidateQueryResponse.java | 4 +++- .../action/bulk/BulkItemResponse.java | 7 ++++-- .../opensearch/action/bulk/BulkRequest.java | 4 +++- .../action/bulk/BulkRequestBuilder.java | 4 +++- .../opensearch/action/bulk/BulkResponse.java | 4 +++- .../action/delete/DeleteRequest.java | 4 +++- .../action/delete/DeleteRequestBuilder.java | 4 +++- .../action/delete/DeleteResponse.java | 7 ++++-- .../action/explain/ExplainRequest.java | 4 +++- .../action/explain/ExplainRequestBuilder.java | 4 +++- .../action/explain/ExplainResponse.java | 4 +++- .../action/fieldcaps/FieldCapabilities.java | 4 +++- .../fieldcaps/FieldCapabilitiesRequest.java | 4 ++++ .../FieldCapabilitiesRequestBuilder.java | 4 +++- .../fieldcaps/FieldCapabilitiesResponse.java | 5 ++++- .../org/opensearch/action/get/GetRequest.java | 4 +++- .../action/get/GetRequestBuilder.java | 4 +++- .../opensearch/action/get/GetResponse.java | 4 +++- .../action/get/MultiGetItemResponse.java | 4 +++- .../action/get/MultiGetRequest.java | 7 ++++-- .../action/get/MultiGetRequestBuilder.java | 4 +++- .../action/get/MultiGetResponse.java | 7 ++++-- .../opensearch/action/index/IndexRequest.java | 4 +++- .../action/index/IndexRequestBuilder.java | 4 +++- .../action/index/IndexResponse.java | 7 ++++-- .../action/ingest/DeletePipelineRequest.java | 4 +++- .../ingest/DeletePipelineRequestBuilder.java | 4 +++- .../action/ingest/GetPipelineRequest.java | 4 +++- .../ingest/GetPipelineRequestBuilder.java | 4 +++- .../action/ingest/GetPipelineResponse.java | 4 +++- .../action/ingest/PutPipelineRequest.java | 4 +++- .../ingest/PutPipelineRequestBuilder.java | 4 +++- .../action/ingest/SimulateDocumentResult.java | 4 +++- .../ingest/SimulatePipelineRequest.java | 4 +++- .../SimulatePipelineRequestBuilder.java | 4 +++- .../ingest/SimulatePipelineResponse.java | 4 +++- .../action/search/ClearScrollRequest.java | 4 +++- .../search/ClearScrollRequestBuilder.java | 4 +++- .../action/search/ClearScrollResponse.java | 4 +++- .../action/search/CreatePitRequest.java | 4 ++++ .../action/search/CreatePitResponse.java | 4 ++++ .../action/search/DeletePitInfo.java | 4 ++++ .../action/search/DeletePitRequest.java | 4 ++++ .../action/search/DeletePitResponse.java | 4 ++++ .../search/DeleteSearchPipelineRequest.java | 4 +++- .../action/search/GetAllPitNodeResponse.java | 4 ++++ .../action/search/GetAllPitNodesRequest.java | 4 ++++ .../action/search/GetAllPitNodesResponse.java | 4 ++++ .../search/GetSearchPipelineRequest.java | 4 +++- .../search/GetSearchPipelineResponse.java | 4 +++- .../opensearch/action/search/ListPitInfo.java | 4 ++++ .../action/search/MultiSearchRequest.java | 4 +++- .../search/MultiSearchRequestBuilder.java | 4 +++- .../action/search/MultiSearchResponse.java | 7 ++++-- .../search/PutSearchPipelineRequest.java | 4 +++- .../action/search/SearchPhaseName.java | 6 ++++- .../action/search/SearchProgressListener.java | 2 ++ .../action/search/SearchRequest.java | 4 +++- .../action/search/SearchRequestBuilder.java | 4 +++- .../action/search/SearchRequestStats.java | 4 +++- .../action/search/SearchResponse.java | 10 ++++++--- .../action/search/SearchResponseSections.java | 4 +++- .../action/search/SearchScrollRequest.java | 4 +++- .../search/SearchScrollRequestBuilder.java | 4 +++- .../opensearch/action/search/SearchTask.java | 4 +++- .../opensearch/action/search/SearchType.java | 5 ++++- .../action/support/IndicesOptions.java | 7 ++++-- .../action/support/WriteRequest.java | 4 +++- .../support/master/AcknowledgedResponse.java | 4 +++- .../MultiTermVectorsItemResponse.java | 4 +++- .../termvectors/MultiTermVectorsRequest.java | 4 +++- .../MultiTermVectorsRequestBuilder.java | 4 +++- .../termvectors/MultiTermVectorsResponse.java | 7 ++++-- .../action/termvectors/TermVectorsFilter.java | 7 ++++-- .../termvectors/TermVectorsRequest.java | 10 ++++++--- .../TermVectorsRequestBuilder.java | 4 +++- .../termvectors/TermVectorsResponse.java | 4 +++- .../action/update/UpdateRequest.java | 4 +++- .../action/update/UpdateRequestBuilder.java | 4 +++- .../action/update/UpdateResponse.java | 7 ++++-- .../cluster/ClusterChangedEvent.java | 4 +++- .../org/opensearch/cluster/ClusterInfo.java | 7 ++++-- .../org/opensearch/cluster/ClusterName.java | 4 +++- .../java/org/opensearch/cluster/Diff.java | 4 +++- .../org/opensearch/cluster/DiskUsage.java | 4 +++- .../ClusterAwarenessAttributeValueHealth.java | 4 ++++ .../ClusterAwarenessAttributesHealth.java | 3 +++ .../ClusterAwarenessHealth.java | 4 ++++ .../cluster/block/ClusterBlock.java | 4 +++- .../cluster/block/ClusterBlockLevel.java | 5 ++++- .../decommission/DecommissionAttribute.java | 4 +++- .../decommission/DecommissionStatus.java | 5 +++++ .../cluster/health/ClusterHealthStatus.java | 4 +++- .../cluster/health/ClusterIndexHealth.java | 4 +++- .../cluster/health/ClusterShardHealth.java | 4 +++- .../cluster/metadata/AliasMetadata.java | 7 ++++-- .../cluster/metadata/CryptoMetadata.java | 4 +++- .../cluster/metadata/DataStream.java | 7 ++++-- .../cluster/metadata/IndexMetadata.java | 4 +++- .../metadata/IndexTemplateMetadata.java | 3 ++- .../cluster/metadata/MappingMetadata.java | 4 +++- .../cluster/metadata/RepositoryMetadata.java | 4 +++- .../cluster/node/DiscoveryNodes.java | 10 ++++++--- .../cluster/routing/RecoverySource.java | 7 ++++-- .../cluster/routing/ShardRoutingState.java | 5 ++++- .../cluster/routing/UnassignedInfo.java | 10 ++++++--- .../cluster/routing/WeightedRouting.java | 4 +++- .../allocation/RerouteExplanation.java | 4 +++- .../allocation/RoutingExplanations.java | 4 +++- .../routing/allocation/decider/Decision.java | 7 ++++-- .../service/ClusterManagerTaskThrottler.java | 4 ++++ .../cluster/service/ClusterService.java | 4 +--- .../cluster/service/PendingClusterTask.java | 4 +++- .../opensearch/common/FieldMemoryStats.java | 4 +++- .../java/org/opensearch/common/Priority.java | 4 +++- .../blobstore/stream/read/ReadContext.java | 5 +++++ .../common/document/DocumentField.java | 4 +++- .../opensearch/common/geo/ShapeRelation.java | 4 +++- .../org/opensearch/common/inject/Module.java | 5 +---- .../common/io/stream/BytesStreamOutput.java | 4 +++- .../common/logging/DeprecationLogger.java | 7 ++++-- .../common/settings/IndexScopedSettings.java | 4 +++- .../org/opensearch/common/unit/Fuzziness.java | 4 +++- .../org/opensearch/common/unit/SizeValue.java | 4 +++- .../java/org/opensearch/http/HttpChannel.java | 4 +++- .../java/org/opensearch/http/HttpRequest.java | 7 ++++-- .../org/opensearch/http/HttpResponse.java | 5 ++++- .../org/opensearch/index/IndexModule.java | 3 ++- .../org/opensearch/index/IndexSortConfig.java | 7 ++++-- .../index/MergeSchedulerConfig.java | 4 +++- .../opensearch/index/ReplicationStats.java | 4 +++- .../SegmentReplicationPerGroupStats.java | 4 +++- .../index/SegmentReplicationShardStats.java | 4 +++- .../org/opensearch/index/VersionType.java | 4 +++- .../index/analysis/AnalysisMode.java | 5 ++++- .../index/analysis/AnalyzerScope.java | 5 ++++- .../index/analysis/NameOrDefinition.java | 4 +++- .../index/analysis/NamedAnalyzer.java | 4 +++- .../index/cache/query/QueryCacheStats.java | 4 +++- .../cache/request/RequestCacheStats.java | 4 +++- .../opensearch/index/engine/CommitStats.java | 4 +++- .../org/opensearch/index/engine/Segment.java | 4 +++- .../index/engine/SegmentsStats.java | 4 +++- .../index/fielddata/FieldDataStats.java | 4 +++- .../index/fielddata/ScriptDocValues.java | 4 +++- .../fielddata/SortedBinaryDocValues.java | 4 +++- .../opensearch/index/flush/FlushStats.java | 4 +++- .../org/opensearch/index/get/GetResult.java | 2 ++ .../org/opensearch/index/get/GetStats.java | 2 ++ .../opensearch/index/merge/MergeStats.java | 4 +++- .../opensearch/index/query/IntervalMode.java | 4 +++- .../opensearch/index/query/QueryBuilder.java | 4 +++- .../index/query/QueryBuilderVisitor.java | 4 ++++ .../index/query/QueryRewriteContext.java | 4 +++- .../index/recovery/RecoveryStats.java | 4 +++- .../index/refresh/RefreshStats.java | 4 +++- .../index/remote/RemoteSegmentStats.java | 4 +++- .../remote/RemoteSegmentTransferTracker.java | 7 ++++-- .../remote/RemoteTranslogTransferTracker.java | 7 ++++-- .../index/search/stats/SearchStats.java | 14 +++++++----- .../index/seqno/RetentionLease.java | 4 +++- .../index/seqno/RetentionLeaseStats.java | 4 +++- .../index/seqno/RetentionLeases.java | 4 +++- .../opensearch/index/seqno/SeqNoStats.java | 4 +++- .../org/opensearch/index/shard/DocsStats.java | 4 +++- .../opensearch/index/shard/IndexingStats.java | 10 ++++++--- .../store/DirectoryFileTransferTracker.java | 7 ++++-- .../opensearch/index/store/StoreStats.java | 4 +++- .../remote/filecache/FileCacheStats.java | 4 +++- .../index/translog/RemoteTranslogStats.java | 4 +++- .../opensearch/index/translog/Translog.java | 4 +++- .../index/translog/TranslogStats.java | 4 +++- .../opensearch/index/warmer/WarmerStats.java | 4 +++- .../indices/recovery/RecoverySettings.java | 4 +++- .../indices/recovery/RecoveryState.java | 13 +++++++---- .../replication/SegmentReplicationState.java | 7 ++++-- .../common/ReplicationLuceneIndex.java | 7 ++++-- .../replication/common/ReplicationTimer.java | 4 +++- .../ingest/PipelineConfiguration.java | 4 +++- .../org/opensearch/monitor/fs/FsInfo.java | 13 +++++++---- .../org/opensearch/monitor/os/OsStats.java | 19 +++++++++++----- .../plugins/CryptoKeyProviderPlugin.java | 3 ++- .../org/opensearch/plugins/PluginInfo.java | 2 ++ .../repositories/RepositoriesStats.java | 6 ++++- .../repositories/RepositoryCleanupResult.java | 4 +++- .../java/org/opensearch/rest/RestRequest.java | 5 ++++- .../opensearch/script/ScriptCacheStats.java | 2 ++ .../opensearch/script/ScriptContextInfo.java | 7 ++++-- .../opensearch/script/ScriptContextStats.java | 4 +++- .../script/ScriptLanguagesInfo.java | 4 +++- .../org/opensearch/script/ScriptStats.java | 4 +++- .../opensearch/script/StoredScriptSource.java | 4 +++- .../java/org/opensearch/search/Scroll.java | 4 +++- .../opensearch/search/SearchExtBuilder.java | 4 +++- .../search/builder/PointInTimeBuilder.java | 4 +++- .../search/collapse/CollapseBuilder.java | 4 +++- .../search/collapse/CollapseContext.java | 4 +++- .../fetch/subphase/FetchSourceContext.java | 4 +++- .../subphase/highlight/HighlightBuilder.java | 13 +++++++---- .../highlight/SearchHighlightContext.java | 10 ++++++--- .../search/internal/AliasFilter.java | 4 +++- .../pipeline/PipelineConfiguration.java | 4 ++++ .../search/profile/NetworkTime.java | 4 +++- .../search/profile/ProfileResult.java | 4 +++- .../search/profile/ProfileShardResult.java | 4 +++- .../AggregationProfileShardResult.java | 4 +++- .../search/profile/query/CollectorResult.java | 4 +++- .../query/QueryProfileShardResult.java | 4 +++- .../opensearch/search/slice/SliceBuilder.java | 8 +++---- .../org/opensearch/search/sort/SortOrder.java | 4 +++- .../opensearch/search/suggest/Suggest.java | 12 +++++++++- .../search/suggest/SuggestBuilder.java | 4 +++- .../opensearch/search/suggest/Suggester.java | 4 +++- .../search/suggest/SuggestionBuilder.java | 4 +++- .../suggest/SuggestionSearchContext.java | 7 ++++-- .../suggest/completion/CompletionStats.java | 4 +++- .../org/opensearch/snapshots/RestoreInfo.java | 4 +++- .../org/opensearch/snapshots/Snapshot.java | 4 +++- .../org/opensearch/snapshots/SnapshotId.java | 4 +++- .../opensearch/snapshots/SnapshotInfo.java | 4 +++- .../snapshots/SnapshotShardFailure.java | 4 +++- .../opensearch/snapshots/SnapshotState.java | 5 ++++- .../main/java/org/opensearch/tasks/Task.java | 12 +++++----- .../java/org/opensearch/tasks/TaskInfo.java | 4 +++- .../java/org/opensearch/tasks/TaskResult.java | 4 +++- .../threadpool/ExecutorBuilder.java | 4 +++- .../org/opensearch/threadpool/Scheduler.java | 7 ++++++ .../org/opensearch/threadpool/ThreadPool.java | 6 +++-- .../opensearch/threadpool/ThreadPoolInfo.java | 4 +++- .../threadpool/ThreadPoolStats.java | 7 ++++-- .../opensearch/watcher/ResourceWatcher.java | 5 ++++- .../watcher/ResourceWatcherService.java | 3 ++- .../org/opensearch/watcher/WatcherHandle.java | 5 ++++- 497 files changed, 1757 insertions(+), 579 deletions(-) diff --git a/libs/common/src/main/java/org/opensearch/common/CheckedBiConsumer.java b/libs/common/src/main/java/org/opensearch/common/CheckedBiConsumer.java index 50c15bb7a95a8..c2ef08e288346 100644 --- a/libs/common/src/main/java/org/opensearch/common/CheckedBiConsumer.java +++ b/libs/common/src/main/java/org/opensearch/common/CheckedBiConsumer.java @@ -32,13 +32,16 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.util.function.BiConsumer; /** * A {@link BiConsumer}-like interface which allows throwing checked exceptions. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface CheckedBiConsumer { void accept(T t, U u) throws E; diff --git a/libs/common/src/main/java/org/opensearch/common/CheckedFunction.java b/libs/common/src/main/java/org/opensearch/common/CheckedFunction.java index 9c17ad4b4ee3f..927edd1b9905a 100644 --- a/libs/common/src/main/java/org/opensearch/common/CheckedFunction.java +++ b/libs/common/src/main/java/org/opensearch/common/CheckedFunction.java @@ -32,6 +32,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.util.function.Function; /** @@ -39,6 +41,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface CheckedFunction { R apply(T t) throws E; diff --git a/libs/common/src/main/java/org/opensearch/common/TriFunction.java b/libs/common/src/main/java/org/opensearch/common/TriFunction.java index 7b1bbece68680..8594e8e2cd0c9 100644 --- a/libs/common/src/main/java/org/opensearch/common/TriFunction.java +++ b/libs/common/src/main/java/org/opensearch/common/TriFunction.java @@ -32,6 +32,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + /** * Represents a function that accepts three arguments and produces a result. * @@ -40,8 +42,9 @@ * @param the type of the third argument * @param the return type * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface TriFunction { /** diff --git a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java index 5c0e3f2de7708..a5d97dcd85ef7 100644 --- a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java @@ -32,12 +32,15 @@ package org.opensearch.common.collect; +import org.opensearch.common.annotation.PublicApi; + /** * Java 9 Tuple * todo: deprecate and remove w/ min jdk upgrade to 11? * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Tuple { public static Tuple tuple(V1 v1, V2 v2) { diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java b/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java index 8afa48eb92c0f..31d2dcd0dba3d 100644 --- a/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java +++ b/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java @@ -7,12 +7,17 @@ */ package org.opensearch.common.crypto; +import org.opensearch.common.annotation.ExperimentalApi; + import java.io.Closeable; import java.util.Map; /** * Master key provider responsible for management of master keys. + * + * @opensearch.experimental */ +@ExperimentalApi public interface MasterKeyProvider extends Closeable { /** diff --git a/libs/common/src/main/java/org/opensearch/common/io/InputStreamContainer.java b/libs/common/src/main/java/org/opensearch/common/io/InputStreamContainer.java index eb8a4e1382497..3095336338f7f 100644 --- a/libs/common/src/main/java/org/opensearch/common/io/InputStreamContainer.java +++ b/libs/common/src/main/java/org/opensearch/common/io/InputStreamContainer.java @@ -8,13 +8,16 @@ package org.opensearch.common.io; +import org.opensearch.common.annotation.ExperimentalApi; + import java.io.InputStream; /** * Model composed of an input stream and the total content length of the stream * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public class InputStreamContainer { private final InputStream inputStream; diff --git a/libs/core/src/main/java/org/opensearch/OpenSearchParseException.java b/libs/core/src/main/java/org/opensearch/OpenSearchParseException.java index c2516402b0d30..26aff04b30a56 100644 --- a/libs/core/src/main/java/org/opensearch/OpenSearchParseException.java +++ b/libs/core/src/main/java/org/opensearch/OpenSearchParseException.java @@ -32,6 +32,7 @@ package org.opensearch; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.rest.RestStatus; @@ -40,8 +41,9 @@ /** * Unchecked exception that is translated into a {@code 400 BAD REQUEST} error when it bubbles out over HTTP. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenSearchParseException extends OpenSearchException { public OpenSearchParseException(String msg, Object... args) { diff --git a/libs/core/src/main/java/org/opensearch/core/action/NotifyOnceListener.java b/libs/core/src/main/java/org/opensearch/core/action/NotifyOnceListener.java index 6af9ca005d171..f087322e0024c 100644 --- a/libs/core/src/main/java/org/opensearch/core/action/NotifyOnceListener.java +++ b/libs/core/src/main/java/org/opensearch/core/action/NotifyOnceListener.java @@ -32,6 +32,8 @@ package org.opensearch.core.action; +import org.opensearch.common.annotation.PublicApi; + import java.util.concurrent.atomic.AtomicBoolean; /** @@ -39,8 +41,9 @@ * the is called is only called once. Subclasses should implement notification logic with * innerOnResponse and innerOnFailure. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class NotifyOnceListener implements ActionListener { private final AtomicBoolean hasBeenCalled = new AtomicBoolean(false); diff --git a/libs/core/src/main/java/org/opensearch/core/common/text/Text.java b/libs/core/src/main/java/org/opensearch/core/common/text/Text.java index ca5402edae59e..3a46bd4602297 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/text/Text.java +++ b/libs/core/src/main/java/org/opensearch/core/common/text/Text.java @@ -32,6 +32,7 @@ package org.opensearch.core.common.text; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContentFragment; @@ -44,8 +45,9 @@ * Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if * the other is requests, caches the other one in a local reference so no additional conversion will be needed. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Text implements Comparable, ToXContentFragment { public static final Text[] EMPTY_ARRAY = new Text[0]; diff --git a/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java b/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java index 551504ed6f719..3b5fbb7d76307 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java +++ b/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java @@ -32,6 +32,7 @@ package org.opensearch.core.common.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.network.NetworkAddress; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * A transport address used for IP socket address (wraps {@link java.net.InetSocketAddress}). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TransportAddress implements Writeable, ToXContentFragment { /** diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStats.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStats.java index d65f75581dd1b..e99afbb759031 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStats.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStats.java @@ -8,11 +8,14 @@ package org.opensearch.core.tasks.resourcetracker; +import org.opensearch.common.annotation.PublicApi; + /** * Different resource stats are defined. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public enum ResourceStats { CPU("cpu_time_in_nanos"), MEMORY("memory_in_bytes"); diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStatsType.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStatsType.java index fce8cc65e9bc5..2aedff2940d83 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStatsType.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStatsType.java @@ -8,11 +8,14 @@ package org.opensearch.core.tasks.resourcetracker; +import org.opensearch.common.annotation.PublicApi; + /** * Defines the different types of resource stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public enum ResourceStatsType { // resource stats of the worker thread reported directly from runnable. WORKER_STATS("worker_stats", false); diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java index 2cbc3d4b2f5c3..a278b61894a65 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import java.util.Collections; import java.util.EnumMap; @@ -22,8 +23,9 @@ * It captures the resource usage information like memory, CPU about a particular execution of thread * for a specific stats type. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class ResourceUsageInfo { private static final Logger logger = LogManager.getLogger(ResourceUsageInfo.class); private final EnumMap statsInfo = new EnumMap<>(ResourceStats.class); diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageMetric.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageMetric.java index 262dbe20dabda..f4cce2de820a0 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageMetric.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageMetric.java @@ -8,11 +8,14 @@ package org.opensearch.core.tasks.resourcetracker; +import org.opensearch.common.annotation.PublicApi; + /** * Information about resource usage * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class ResourceUsageMetric { private final ResourceStats stats; private final long value; diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceStats.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceStats.java index d0d26550a4742..048c4a228fbd5 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceStats.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceStats.java @@ -9,6 +9,7 @@ package org.opensearch.core.tasks.resourcetracker; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -29,8 +30,9 @@ * Writeable TaskResourceStats objects are used to represent resource * snapshot information about currently running task. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class TaskResourceStats implements Writeable, ToXContentFragment { private final Map resourceUsage; private final TaskThreadUsage threadUsage; diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceUsage.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceUsage.java index 7d6cadbef23d7..654f1c5695937 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceUsage.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceUsage.java @@ -8,6 +8,7 @@ package org.opensearch.core.tasks.resourcetracker; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -30,8 +31,9 @@ * Writeable TaskResourceUsage objects are used to represent resource usage * information of running tasks. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class TaskResourceUsage implements Writeable, ToXContentFragment { private static final ParseField CPU_TIME_IN_NANOS = new ParseField("cpu_time_in_nanos"); private static final ParseField MEMORY_IN_BYTES = new ParseField("memory_in_bytes"); diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskThreadUsage.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskThreadUsage.java index b593ec96e5996..abe03e3c520e0 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskThreadUsage.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskThreadUsage.java @@ -8,6 +8,7 @@ package org.opensearch.core.tasks.resourcetracker; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -29,8 +30,9 @@ * Writeable TaskThreadExecutions objects are used to represent thread related resource usage of running tasks. * asd * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class TaskThreadUsage implements Writeable, ToXContentFragment { private static final String THREAD_EXECUTIONS = "thread_executions"; diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ThreadResourceInfo.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ThreadResourceInfo.java index 4b341a94256c4..703fdfdf8a784 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ThreadResourceInfo.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ThreadResourceInfo.java @@ -8,14 +8,17 @@ package org.opensearch.core.tasks.resourcetracker; +import org.opensearch.common.annotation.PublicApi; + /** * Resource consumption information about a particular execution of thread. *

* It captures the resource usage information about a particular execution of thread * for a specific stats type like worker_stats or response_stats etc., * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class ThreadResourceInfo { private final long threadId; private volatile boolean isActive = true; diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java index 79e531a542026..32bbfc600f1f0 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java @@ -33,6 +33,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ObjectParser.NamedObjectParser; import org.opensearch.core.xcontent.ObjectParser.ValueType; @@ -47,8 +48,9 @@ /** * Superclass for {@link ObjectParser} and {@link ConstructingObjectParser}. Defines most of the "declare" methods so they can be shared. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class AbstractObjectParser { /** diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/ContextParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/ContextParser.java index d50dd2e68d890..f6e5647532bee 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/ContextParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/ContextParser.java @@ -32,11 +32,16 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; /** * Reads an object from a parser using some context. + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface ContextParser { T parse(XContentParser p, Context c) throws IOException; diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/DeprecationHandler.java b/libs/core/src/main/java/org/opensearch/core/xcontent/DeprecationHandler.java index 570a13ad8e093..a0e4027290742 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/DeprecationHandler.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/DeprecationHandler.java @@ -32,12 +32,17 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; + import java.util.function.Supplier; /** * Callback for notifying the creator of the {@link XContentParser} that * parsing hit a deprecated field. + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface DeprecationHandler { /** * Throws an {@link UnsupportedOperationException} when parsing hits a diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/ToXContent.java b/libs/core/src/main/java/org/opensearch/core/xcontent/ToXContent.java index 90dd0cbfb9a1a..ee8dad198df09 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/ToXContent.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/ToXContent.java @@ -33,6 +33,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.Booleans; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; import java.util.Map; @@ -42,15 +43,17 @@ * The output may or may not be a value object. Objects implementing {@link ToXContentObject} output a valid value * but those that don't may or may not require emitting a startObject and an endObject. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ToXContent { /** * Base parameters class * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface Params { String param(String key); diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContent.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContent.java index dbc0041af42b5..1ebdd69d2b7a3 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContent.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContent.java @@ -32,6 +32,8 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -41,7 +43,10 @@ /** * A generic abstraction on top of handling content, inspired by JSON and pull parsing. + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface XContent { /** * The type this content handles and produces. diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java index 4bfd47ccfdc94..85c3579b74cd5 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java @@ -33,6 +33,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.PublicApi; import java.io.Closeable; import java.io.IOException; @@ -53,8 +54,9 @@ * NamedXContentRegistry.EMPTY, ParserField."{\"key\" : \"value\"}"); * * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface XContentParser extends Closeable { /** diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java index 9d1581a3a1517..453107fe4ff65 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java @@ -35,6 +35,7 @@ import com.fasterxml.jackson.dataformat.cbor.CBORConstants; import com.fasterxml.jackson.dataformat.smile.SmileConstants; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.cbor.CborXContent; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.smile.SmileXContent; @@ -49,7 +50,9 @@ /** * The content type of {@link XContent}. + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum XContentType implements MediaType { /** diff --git a/server/src/main/java/org/opensearch/OpenSearchGenerationException.java b/server/src/main/java/org/opensearch/OpenSearchGenerationException.java index 50b021d41123f..51093390d57e0 100644 --- a/server/src/main/java/org/opensearch/OpenSearchGenerationException.java +++ b/server/src/main/java/org/opensearch/OpenSearchGenerationException.java @@ -32,6 +32,7 @@ package org.opensearch; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -39,8 +40,9 @@ /** * A generic exception indicating failure to generate. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenSearchGenerationException extends OpenSearchException { public OpenSearchGenerationException(String msg) { diff --git a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java index ffba4d2eb50c0..c30da41066dbd 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java +++ b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java @@ -33,10 +33,12 @@ package org.opensearch.action; import org.opensearch.common.ValidationException; +import org.opensearch.common.annotation.PublicApi; /** * Base exception for an action request validation extendable by plugins * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ActionRequestValidationException extends ValidationException {} diff --git a/server/src/main/java/org/opensearch/action/DocWriteRequest.java b/server/src/main/java/org/opensearch/action/DocWriteRequest.java index 31f61e76c74ff..e09c76430defc 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/opensearch/action/DocWriteRequest.java @@ -38,6 +38,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.update.UpdateRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -55,8 +56,9 @@ * Generic interface to group ActionRequest, which perform writes to a single document * Action requests implementing this can be part of {@link org.opensearch.action.bulk.BulkRequest} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface DocWriteRequest extends IndicesRequest, Accountable { // Flag set for disallowing index auto creation for an individual write request. @@ -169,7 +171,10 @@ public interface DocWriteRequest extends IndicesRequest, Accountable { /** * Requested operation type to perform on the document + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum OpType { /** * Index the source. If there an existing document with the id, it will diff --git a/server/src/main/java/org/opensearch/action/DocWriteResponse.java b/server/src/main/java/org/opensearch/action/DocWriteResponse.java index e3052b3b80035..aada56ed93fd3 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/opensearch/action/DocWriteResponse.java @@ -38,6 +38,7 @@ import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -64,8 +65,9 @@ /** * A base class for the response of a write operation that involves a single doc * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContentObject { private static final String _SHARDS = "_shards"; @@ -80,7 +82,10 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr /** * An enum that represents the results of CRUD operations, primarily used to communicate the type of * operation that occurred. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Result implements Writeable { CREATED(0), UPDATED(1), diff --git a/server/src/main/java/org/opensearch/action/IndicesRequest.java b/server/src/main/java/org/opensearch/action/IndicesRequest.java index 7e4c2f5076cda..f42818595d3cc 100644 --- a/server/src/main/java/org/opensearch/action/IndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/IndicesRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; /** * Needs to be implemented by all {@link org.opensearch.action.ActionRequest} subclasses that relate to @@ -40,8 +41,9 @@ * In case of internal requests originated during the distributed execution of an external request, * they will still return the indices that the original request related to. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndicesRequest { /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index 3dec781f0acf4..4a5c3b076faba 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -35,6 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,8 +49,9 @@ /** * A request to explain the allocation of a shard in the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAllocationExplainRequest extends ClusterManagerNodeRequest { private static final ObjectParser PARSER = new ObjectParser<>("cluster/allocation/explain"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java index d85cb3929873d..31781dda04957 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for requests to explain the allocation of a shard in the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAllocationExplainRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< ClusterAllocationExplainRequest, ClusterAllocationExplainResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java index 0eeedb9af0ab7..17afdd862cf66 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.allocation; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * Explanation response for a shard in the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAllocationExplainResponse extends ActionResponse { private ClusterAllocationExplanation cae; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java index bd783b349bed4..6e4d7b84f204e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.crypto; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.io.stream.StreamInput; @@ -28,8 +29,9 @@ /** * Crypto settings supplied during a put repository request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CryptoSettings implements Writeable, ToXContentObject { private String keyProviderName; private String keyProviderType; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java index 79b7381801da6..8243be21ba487 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -18,8 +19,9 @@ /** * Request for deleting decommission request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DeleteDecommissionStateRequest extends ClusterManagerNodeRequest { public DeleteDecommissionStateRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java index 08f194c53f18e..94075d6ec860d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for Delete decommission request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DeleteDecommissionStateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DeleteDecommissionStateRequest, DeleteDecommissionStateResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java index 3d0404c25373b..13b056a019200 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.decommission.awareness.delete; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -17,8 +18,9 @@ /** * Response returned after deletion of decommission request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DeleteDecommissionStateResponse extends AcknowledgedResponse { public DeleteDecommissionStateResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java index 15c7e165fb62f..3ecbbfbbc7285 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -21,8 +22,9 @@ /** * Get Decommissioned attribute request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class GetDecommissionStateRequest extends ClusterManagerNodeReadRequest { private String attributeName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java index e766e9c674ff7..13eb375f0d00e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Get decommission request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class GetDecommissionStateRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetDecommissionStateRequest, GetDecommissionStateResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java index bbcbd7013c299..9010c0e7d9388 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java @@ -10,6 +10,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -26,8 +27,9 @@ /** * Response for decommission status * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class GetDecommissionStateResponse extends ActionResponse implements ToXContentObject { private String attributeValue; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java index 18f14db3c2b8d..9070aa5a0dc55 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java @@ -11,6 +11,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -23,8 +24,9 @@ /** * Registers a decommission request with decommission attribute and timeout * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionRequest extends ClusterManagerNodeRequest { public static final TimeValue DEFAULT_NODE_DRAINING_TIMEOUT = TimeValue.timeValueSeconds(120); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java index c3591fff54885..e965110cdb9df 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java @@ -12,13 +12,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Register decommission request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DecommissionRequest, DecommissionResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java index 13c1775b005b3..a2401cdf91b07 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.decommission.awareness.put; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -18,8 +19,9 @@ /** * Response for decommission request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionResponse extends AcknowledgedResponse implements ToXContentObject { public DecommissionResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java index a9532077f9129..ec8b01d853da6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -40,6 +40,7 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -53,8 +54,9 @@ /** * Transport request for requesting cluster health * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterHealthRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices; @@ -351,8 +353,9 @@ public ActionRequestValidationException validate() { /** * The level of the health request. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Level { CLUSTER, INDICES, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java index cca9d35d8aa6f..a9a3756755265 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java @@ -38,13 +38,15 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Builder for requesting cluster health * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterHealthRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterHealthRequest, ClusterHealthResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java index fb68012502116..1a27f161343e8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.health.ClusterIndexHealth; import org.opensearch.cluster.health.ClusterStateHealth; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; @@ -67,8 +68,9 @@ /** * Transport response for Cluster Health * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterHealthResponse extends ActionResponse implements StatusToXContentObject { private static final String CLUSTER_NAME = "cluster_name"; private static final String STATUS = "status"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index 39e912a995f35..9e52b90f7bd38 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.hotthreads; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * Transport request for OpenSearch Hot Threads * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesHotThreadsRequest extends BaseNodesRequest { int threads = 3; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java index 3639439dd3fb8..51b455b41115b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Builder class for requesting OpenSearch Hot Threads * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder< NodesHotThreadsRequest, NodesHotThreadsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java index 5af9ce50a4bfe..eeddd2deb7ff8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java @@ -35,6 +35,7 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Transport response for OpenSearch Hot Threads * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesHotThreadsResponse extends BaseNodesResponse { public NodesHotThreadsResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java index 31cacda7c3487..e694a5e102e02 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.info; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * A request to get node (cluster) level information. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesInfoRequest extends BaseNodesRequest { private Set requestedMetrics = Metric.allMetrics(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java index 76ef75b77a1cf..4c3191b017948 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport action for OpenSearch Node Information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder { public NodesInfoRequestBuilder(OpenSearchClient client, NodesInfoAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java index 5b4444053a8b7..7ddd70185e8ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; @@ -59,8 +60,9 @@ /** * Transport response for OpenSearch Node Information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesInfoResponse extends BaseNodesResponse implements ToXContentFragment { public NodesInfoResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java index 2f2162947aeea..d970e4c9e5468 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.settings.SecureString; /** * Builder for the reload secure settings nodes request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesReloadSecureSettingsRequestBuilder extends NodesOperationRequestBuilder< NodesReloadSecureSettingsRequest, NodesReloadSecureSettingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index fc72668d36413..22a667e7e8f6f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,8 +49,9 @@ /** * A request to get node (cluster) level stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesStatsRequest extends BaseNodesRequest { private CommonStatsFlags indices = new CommonStatsFlags(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index e382278f5ddb8..58149e9a34a34 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport builder for obtaining OpenSearch Node Stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder< NodesStatsRequest, NodesStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java index 8fbc52ad8a3f8..73a938568acc3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java @@ -35,6 +35,7 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * Transport response for obtaining OpenSearch Node Stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesStatsResponse extends BaseNodesResponse implements ToXContentFragment { public NodesStatsResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java index 360765e8f4803..183fb2a236148 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.tasks.cancel; import org.opensearch.action.support.tasks.BaseTasksRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.tasks.CancellableTask; @@ -44,8 +45,9 @@ /** * A request to cancel tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CancelTasksRequest extends BaseTasksRequest { public static final String DEFAULT_REASON = "by user request"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java index ee19e8b104603..ac02dfdf7381f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.tasks.TasksRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for the request to cancel tasks running on the specified nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CancelTasksRequestBuilder extends TasksRequestBuilder { public CancelTasksRequestBuilder(OpenSearchClient client, CancelTasksAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java index 979489999cc6e..1f86c7c22c2eb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java @@ -35,6 +35,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -49,8 +50,9 @@ /** * Returns the list of tasks that were cancelled * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CancelTasksResponse extends ListTasksResponse { private static final ConstructingObjectParser PARSER = setupParser( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java index 82902c55b1b0a..13c6d645d4c3a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * A request to get node tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetTaskRequest extends ActionRequest { private TaskId taskId = TaskId.EMPTY_TASK_ID; private boolean waitForCompletion = false; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java index dd429da82ebf8..ea42e1770e7f1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.tasks.TaskId; /** * Builder for the request to retrieve the list of tasks running on the specified nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetTaskRequestBuilder extends ActionRequestBuilder { public GetTaskRequestBuilder(OpenSearchClient client, GetTaskAction action) { super(client, action, new GetTaskRequest()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java index 9d4e6da8c7e62..80901373e14d5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.node.tasks.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -48,8 +49,9 @@ /** * Returns the list of tasks currently running on the nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetTaskResponse extends ActionResponse implements ToXContentObject { private final TaskResult task; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java index 070b93c788ef0..6ee56b0da7884 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.tasks.list; import org.opensearch.action.support.tasks.BaseTasksRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * A request to get node tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListTasksRequest extends BaseTasksRequest { private boolean detailed = false; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java index 45beb0dd899b5..a195b98d06e76 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.tasks.TasksRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for the request to retrieve the list of tasks running on the specified nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListTasksRequestBuilder extends TasksRequestBuilder { public ListTasksRequestBuilder(OpenSearchClient client, ListTasksAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 693566391fb25..337151a2a9268 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.TriFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -64,8 +65,9 @@ /** * Returns the list of tasks currently running on the nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject { private static final String TASKS = "tasks"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java index 0ca114ae0ed5c..fb23a41b8dc19 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.node.tasks.list; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.TaskInfo; @@ -45,8 +46,9 @@ /** * Information about a currently running task and all its subtasks. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TaskGroup implements ToXContentObject { private final TaskInfo task; @@ -65,8 +67,9 @@ public static Builder builder(TaskInfo taskInfo) { /** * Builder for the Task Group * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private TaskInfo taskInfo; private List childTasks; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java index e31b88ace953f..385f48d5690c1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.usage; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * Transport request for collecting OpenSearch telemetry * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesUsageRequest extends BaseNodesRequest { private boolean restActions; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java index 7d1823b59dc04..ec1176ac634fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport builder for collecting OpenSearch telemetry * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesUsageRequestBuilder extends NodesOperationRequestBuilder< NodesUsageRequest, NodesUsageResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java index 575d88c10317c..7dc8a318b2cf4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java @@ -35,6 +35,7 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,8 +49,9 @@ * The response for the nodes usage api which contains the individual usage * statistics for all nodes queried. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesUsageResponse extends BaseNodesResponse implements ToXContentFragment { public NodesUsageResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java index 3b090415c175b..afa2058d1deba 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java @@ -11,6 +11,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -30,8 +31,9 @@ /** * Restore remote store request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public class RestoreRemoteStoreRequest extends ClusterManagerNodeRequest implements ToXContentObject { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java index 66908889b0641..3de926c256f46 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.remotestore.restore; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -28,8 +29,9 @@ /** * Contains information about remote store restores * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public final class RestoreRemoteStoreResponse extends ActionResponse implements ToXContentObject { @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java index f292fcec7ccac..7bddd0deb373b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.remotestore.stats; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -22,8 +23,9 @@ /** * Encapsulates all remote store stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreStats implements Writeable, ToXContentFragment { /** * Stats related to Remote Segment Store operations diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequest.java index f09cf79c5154c..12c316adc75cc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequest.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.remotestore.stats; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -17,8 +18,9 @@ /** * Encapsulates all remote store stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreStatsRequest extends BroadcastRequest { private String[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequestBuilder.java index c31e4a1fd6178..4da700d3dc51b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequestBuilder.java @@ -10,13 +10,15 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Builder for RemoteStoreStatsRequest * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreStatsRequestBuilder extends BroadcastOperationRequestBuilder< RemoteStoreStatsRequest, RemoteStoreStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java index 63a4c97d695b7..cad57d148770b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.remotestore.stats; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -25,8 +26,9 @@ /** * Remote Store stats response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreStatsResponse extends BroadcastResponse { private final RemoteStoreStats[] remoteStoreStats; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java index 7e3f4cd95fc72..3e408c6114690 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java @@ -33,6 +33,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * Transport request for cleaning up snapshot repositories * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CleanupRepositoryRequest extends AcknowledgedRequest { private String repository; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java index 95c4fb372572f..34e42b157e627 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport builder for cleaning up snapshot repositories * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CleanupRepositoryRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< CleanupRepositoryRequest, CleanupRepositoryResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java index 99af862c769e8..e6790e8cbe708 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java @@ -31,6 +31,7 @@ package org.opensearch.action.admin.cluster.repositories.cleanup; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -46,8 +47,9 @@ /** * Transport response for cleaning up snapshot repositories * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class CleanupRepositoryResponse extends ActionResponse implements ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java index 352a3772e039b..04fdf22bee593 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ *

* The unregister repository command just unregisters the repository. No data is getting deleted from the repository. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteRepositoryRequest extends AcknowledgedRequest { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index ffef8d5b41979..6f5d0495e1c9f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for unregister repository request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder< DeleteRepositoryRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java index 6fb086f65497e..80a86f1b79209 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Get repository request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRepositoriesRequest extends ClusterManagerNodeReadRequest { private String[] repositories = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java index 4b93aff4c25bc..b0c18f952b3df 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * Get repository request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRepositoriesRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetRepositoriesRequest, GetRepositoriesResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index f8c8df25be532..f467b240aac31 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ /** * Get repositories response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRepositoriesResponse extends ActionResponse implements ToXContentObject { private RepositoriesMetadata repositories; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index 582f73f335b49..3fbd5743b5c8c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.cluster.crypto.CryptoSettings; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -57,8 +58,9 @@ * Registers a repository with given name, type and settings. If the repository with the same name already * exists in the cluster, the new repository will replace the existing repository. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutRepositoryRequest extends AcknowledgedRequest implements ToXContentObject { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index cf649ee6b4cbf..22aa6d7dc7c00 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -44,8 +45,9 @@ /** * Register repository request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder< PutRepositoryRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java index b84161e716f5d..ae6c92d8625ca 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Verify repository request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class VerifyRepositoryRequest extends AcknowledgedRequest { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java index c405fb9bc12cd..023f223700775 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for verify repository request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class VerifyRepositoryRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< VerifyRepositoryRequest, VerifyRepositoryResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java index d7af3478bdac3..12d6a4cca5683 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.verify; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -54,8 +55,9 @@ /** * Verify repository response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class VerifyRepositoryResponse extends ActionResponse implements ToXContentObject { static final String NODES = "nodes"; @@ -64,8 +66,9 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte /** * Inner Node View * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class NodeView implements Writeable, ToXContentObject { private static final ObjectParser.NamedObjectParser PARSER; static { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java index a6addce14787d..1cefaa4866110 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocationCommands; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Request to submit cluster reroute allocation commands * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterRerouteRequest extends AcknowledgedRequest { private AllocationCommands commands = new AllocationCommands(); private boolean dryRun; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java index 01d52cb43320d..fc8310bdf7852 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; +import org.opensearch.common.annotation.PublicApi; /** * Builder for a cluster reroute request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder< ClusterRerouteRequest, ClusterRerouteResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index a62029218ca25..ff01888040e82 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.allocation.RoutingExplanations; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContent; @@ -46,8 +47,9 @@ /** * Response returned after a cluster reroute request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterRerouteResponse extends AcknowledgedResponse implements ToXContentObject { private final ClusterState state; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 6b62a5edd8c28..77aee99c2e902 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -55,8 +56,9 @@ /** * Request for an update cluster settings action * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterUpdateSettingsRequest extends AcknowledgedRequest implements ToXContentObject { private static final ParseField PERSISTENT = new ParseField("persistent"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 4d08c94f78b6a..53f1f17bbeb50 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -42,8 +43,9 @@ /** * Builder for a cluster update settings request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder< ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index 1c543260f7aee..2dfdb49736773 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.settings; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -49,8 +50,9 @@ /** * A response for a cluster update settings action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterUpdateSettingsResponse extends AcknowledgedResponse { private static final ParseField PERSISTENT = new ParseField("persistent"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java index 84e9554932864..eaafca21e1894 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.shards; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -45,8 +46,9 @@ /** * Transport action for searching shard groups * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterSearchShardsGroup implements Writeable, ToXContentObject { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index 7ddb945ad911e..62e05ebb37e28 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * Transport request for searching shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterSearchShardsRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java index 674a2c2c36221..c4f8a29bbcf3d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for searching shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterSearchShardsRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterSearchShardsRequest, ClusterSearchShardsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java index 2b24d870219bb..8ab6455bca4a7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.shards; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * Transport response for searching shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterSearchShardsResponse extends ActionResponse implements ToXContentObject { private final ClusterSearchShardsGroup[] groups; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java index 7f69fe9fe3c72..72de28ca7e699 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java @@ -15,6 +15,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; @@ -32,8 +33,9 @@ /** * Request to delete weights for weighted round-robin shard routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterDeleteWeightedRoutingRequest extends ClusterManagerNodeRequest { private static final Logger logger = LogManager.getLogger(ClusterDeleteWeightedRoutingRequest.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java index bb34fea589534..e0d4e0ad2abed 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Request builder to delete weights for weighted round-robin shard routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterDeleteWeightedRoutingRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< ClusterDeleteWeightedRoutingRequest, ClusterDeleteWeightedRoutingResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java index 2a417e9f4287f..f6a18ae5055ae 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -17,8 +18,9 @@ /** * Response from deleting weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterDeleteWeightedRoutingResponse extends AcknowledgedResponse { ClusterDeleteWeightedRoutingResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java index 7dcec15c750fc..937829de1db00 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -20,8 +21,9 @@ /** * Request to get weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterGetWeightedRoutingRequest extends ClusterManagerNodeReadRequest { String awarenessAttribute; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java index 82f4c1106461d..3cb5e7d3d07b9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Request builder to get weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterGetWeightedRoutingRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterGetWeightedRoutingRequest, ClusterGetWeightedRoutingResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java index 5ef14d0e00c49..b109ecb7de5d9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java @@ -11,6 +11,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -28,8 +29,9 @@ /** * Response from fetching weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterGetWeightedRoutingResponse extends ActionResponse implements ToXContentObject { private static final String WEIGHTS = "weights"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java index 8e8432a384aa5..c310e28610184 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java @@ -16,6 +16,7 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; @@ -37,8 +38,9 @@ /** * Request to update weights for weighted round-robin shard routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterPutWeightedRoutingRequest extends ClusterManagerNodeRequest { private static final Logger logger = LogManager.getLogger(ClusterPutWeightedRoutingRequest.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestBuilder.java index adfb2cf02f6d9..c520b0efd9838 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestBuilder.java @@ -11,12 +11,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.annotation.PublicApi; /** * Request builder to update weights for weighted round-robin shard routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterPutWeightedRoutingRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< ClusterPutWeightedRoutingRequest, ClusterPutWeightedRoutingResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java index cbf10aa74f8a2..4fee2f05a8715 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.shards.routing.weighted.put; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -16,8 +17,9 @@ /** * Response from updating weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterPutWeightedRoutingResponse extends AcknowledgedResponse { public ClusterPutWeightedRoutingResponse(boolean acknowledged) { super(acknowledged); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java index 694b13e37bb03..4a05911610137 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ /** * Transport request for cloning a snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloneSnapshotRequest extends ClusterManagerNodeRequest implements IndicesRequest.Replaceable, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java index 0f47d77d6a9d3..839a1b935ad1f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java @@ -37,13 +37,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; /** * Transport request builder for cloning a snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloneSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< CloneSnapshotRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 0cfc24994768a..0ad3071a99045 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -38,6 +38,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.Strings; @@ -76,8 +77,9 @@ *

  • must not contain invalid file name characters {@link Strings#INVALID_FILENAME_CHARS}
  • * * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateSnapshotRequest extends ClusterManagerNodeRequest implements IndicesRequest.Replaceable, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index 40d440419819c..c378c416cc973 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -43,8 +44,9 @@ /** * Create snapshot request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< CreateSnapshotRequest, CreateSnapshotResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java index 1517112b9b3e1..5d25cf6cacfab 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.create; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -51,8 +52,9 @@ /** * Create snapshot response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateSnapshotResponse extends ActionResponse implements ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index d08c3033e7e10..21280381610f7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ * Delete snapshot request removes snapshots from the repository and cleans up all files that are associated with the snapshots. * All files that are shared with at least one other existing snapshot are left intact. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteSnapshotRequest extends ClusterManagerNodeRequest { private String repository; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java index 1fa855265ffd4..f4da1ec0f7785 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Delete snapshot request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DeleteSnapshotRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 8e2e2d3390058..7e95885c60f93 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Get snapshot request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSnapshotsRequest extends ClusterManagerNodeRequest { public static final String ALL_SNAPSHOTS = "_all"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index 3434f1cb47a99..983325aa575d7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * Get snapshots request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSnapshotsRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< GetSnapshotsRequest, GetSnapshotsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index 5d5de4c7fa85e..6b0e8ba8a372f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -53,8 +54,9 @@ /** * Get snapshots response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSnapshotsResponse extends ActionResponse implements ToXContentObject { @SuppressWarnings("unchecked") diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index c688b13ed9fdd..492ef86bb7843 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -63,15 +64,19 @@ /** * Restore snapshot request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreSnapshotRequest extends ClusterManagerNodeRequest implements ToXContentObject { private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestoreSnapshotRequest.class); /** * Enumeration of possible storage types + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum StorageType { LOCAL("local"), REMOTE_SNAPSHOT("remote_snapshot"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index d9cca536d1c41..39eaadf3c8de6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -44,8 +45,9 @@ /** * Restore snapshot request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< RestoreSnapshotRequest, RestoreSnapshotResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java index f6e50c69da3dc..c94645a6deb8f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.restore; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -53,8 +54,9 @@ /** * Contains information about restores snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreSnapshotResponse extends ActionResponse implements ToXContentObject { @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java index 6e250962d1210..cd7f4b392c61d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java @@ -32,11 +32,14 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; + /** * Stage for snapshotting an Index Shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum SnapshotIndexShardStage { /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index fc3ffd4977da5..f991e90cb0728 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -35,6 +35,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.action.support.broadcast.BroadcastShardResponse; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -56,8 +57,9 @@ /** * Status for snapshotting an Index Shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotIndexShardStatus extends BroadcastShardResponse implements ToXContentFragment { private SnapshotIndexShardStage stage = SnapshotIndexShardStage.INIT; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java index 9c2db62c33bd0..402fee76bc663 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; @@ -54,8 +55,9 @@ /** * Represents snapshot status of all shards in the index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotIndexStatus implements Iterable, ToXContentFragment { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java index ad514a13312ba..bfd0c23c579bc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContent; @@ -47,8 +48,9 @@ /** * Status of a snapshot shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotShardsStats implements ToXContentObject { private int initializingShards; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java index babdb7540a314..f287e94edd0dc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -50,8 +51,9 @@ /** * Stats for snapshots * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotStats implements Writeable, ToXContentObject { private long startTime; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java index 22fca3c54c604..061e73f1094b5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Get snapshot status request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotsStatusRequest extends ClusterManagerNodeRequest { private String repository = "_all"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java index 55f156d4a470e..9377eca60e353 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * Snapshots status request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotsStatusRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< SnapshotsStatusRequest, SnapshotsStatusResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java index 2f0e5e4b4686f..df436a587c2ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -51,8 +52,9 @@ /** * Snapshot status response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotsStatusResponse extends ActionResponse implements ToXContentObject { private final List snapshots; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java index 7c937a1700db0..90a52f7406d57 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -46,8 +47,9 @@ /** * Transport request for obtaining cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { public static final TimeValue DEFAULT_WAIT_FOR_NODE_TIMEOUT = TimeValue.timeValueMinutes(1); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java index b9bfeca9f7386..01a49c15fc1ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java @@ -35,13 +35,15 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Transport request builder for obtaining cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterStateRequest, ClusterStateResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index b947ad34a8cc2..d09105b2bd0a0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * The response for getting the cluster state. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateResponse extends ActionResponse { private ClusterName clusterName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java index 3372448bdacae..b7054ae99361b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -61,8 +62,9 @@ /** * Statistics about analysis usage. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class AnalysisStats implements ToXContentFragment, Writeable { /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java index 63ac76ae65783..26e554f44fca1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.stats; import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.cache.query.QueryCacheStats; @@ -50,8 +51,9 @@ /** * Cluster Stats per index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsIndices implements ToXContentFragment { private int indexCount; @@ -180,8 +182,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner Shard Stats * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardStats implements ToXContentFragment { int indices; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java index 5689596763cef..b44e9cfc5c74a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -38,6 +38,7 @@ import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; @@ -71,8 +72,9 @@ /** * Per Node Cluster Stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsNodes implements ToXContentFragment { private final Counts counts; @@ -214,8 +216,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner Counts * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Counts implements ToXContentFragment { static final String COORDINATING_ONLY = "coordinating_only"; @@ -282,8 +285,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner Operating System Stats * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class OsStats implements ToXContentFragment { final int availableProcessors; final int allocatedProcessors; @@ -395,8 +399,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner Process Stats * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ProcessStats implements ToXContentFragment { final int count; @@ -498,8 +503,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner JVM Stats * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class JvmStats implements ToXContentFragment { private final Map versions; @@ -626,8 +632,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner JVM Version * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class JvmVersion { String version; String vmName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java index dc472c10f550b..6a99451c596ed 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.stats; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * A request to get cluster level stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsRequest extends BaseNodesRequest { public ClusterStatsRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java index aaf5e3aeffeb8..0dcb03dc26d0e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for obtaining cluster stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder< ClusterStatsRequest, ClusterStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java index 1b609b1f7556d..cc002b689a2a5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ /** * Transport response for obtaining cluster stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsResponse extends BaseNodesResponse implements ToXContentFragment { final ClusterStatsNodes nodesStats; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java index f833c52493e00..f73c363b2ea60 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.stats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Statistics about an index feature. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexFeatureStats implements ToXContent, Writeable { final String name; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java index 64218765a57e6..8e6fdb02b1f22 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -58,8 +59,9 @@ /** * Usage statistics about mappings usage. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class MappingStats implements ToXContentFragment, Writeable { /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java index 7d92162015950..0bb4f3625ddad 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Transport request for deleting stored scripts * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteStoredScriptRequest extends AcknowledgedRequest { private String id; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java index 34e0d429f2098..cbadde386d5a9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for deleting stored scripts * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< DeleteStoredScriptRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java index 70384b5fb648e..25bc3ecd6b7ee 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Transport request for getting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetStoredScriptRequest extends ClusterManagerNodeReadRequest { protected String id; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java index ae969963be62f..ca0bd32f1f38b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for getting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetStoredScriptRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetStoredScriptRequest, GetStoredScriptResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java index cf5a3ec44e560..a81faff2abb03 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.storedscripts; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -53,8 +54,9 @@ /** * Transport response for getting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetStoredScriptResponse extends ActionResponse implements StatusToXContentObject { public static final ParseField _ID_PARSE_FIELD = new ParseField("_id"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index f45bee955da02..8731b18fff338 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; @@ -54,8 +55,9 @@ /** * Transport request for putting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutStoredScriptRequest extends AcknowledgedRequest implements ToXContentFragment { private String id; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java index 2a06cd23c10b6..46773177e9a74 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java @@ -35,14 +35,16 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; /** * Transport request builder for putting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< PutStoredScriptRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java index 83e8b93b32e0f..16103f02be596 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -41,8 +42,9 @@ /** * Transport request for getting pending cluster tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingClusterTasksRequest extends ClusterManagerNodeReadRequest { public PendingClusterTasksRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java index b5e77f291a701..c932c2e91f314 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for getting pending cluster tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingClusterTasksRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< PendingClusterTasksRequest, PendingClusterTasksResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java index 118d2cf3065e6..9f4568c88b273 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.tasks; import org.opensearch.cluster.service.PendingClusterTask; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * Transport response for getting pending cluster tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingClusterTasksResponse extends ActionResponse implements Iterable, ToXContentObject { private final List pendingTasks; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java index 94dbb5ff46a02..0b56216790d94 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchGenerationException; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; @@ -54,8 +55,9 @@ /** * Represents an alias, to be associated with an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Alias implements Writeable, ToXContentFragment { private static final ParseField FILTER = new ParseField("filter"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index 2899e791604a5..bc27c70282368 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -38,6 +38,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.metadata.AliasAction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -72,8 +73,9 @@ /** * A request to add/remove aliases for one or more indices. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesAliasesRequest extends AcknowledgedRequest implements ToXContentObject { private List allAliasActions = new ArrayList<>(); @@ -95,8 +97,9 @@ public IndicesAliasesRequest() {} /** * Request to take one or more actions on one or more indexes and alias combinations. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AliasActions implements AliasesRequest, Writeable, ToXContentObject { private static final ParseField INDEX = new ParseField("index"); @@ -118,8 +121,9 @@ public static class AliasActions implements AliasesRequest, Writeable, ToXConten /** * The type of request. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Type { ADD((byte) 0, AliasActions.ADD), REMOVE((byte) 1, AliasActions.REMOVE), diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java index 13c57cc781925..d262c9cd42ce9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryBuilder; import java.util.Map; @@ -42,8 +43,9 @@ /** * Builder for request to modify many aliases at once. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder< IndicesAliasesRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java index 28894baa629b3..00d754c8fb029 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -35,6 +35,7 @@ import org.opensearch.action.AliasesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Transport request for listing index aliases * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetAliasesRequest extends ClusterManagerNodeReadRequest implements AliasesRequest { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java index aecbd689a647c..e9a15e9f9dfb3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java @@ -33,12 +33,14 @@ package org.opensearch.action.admin.indices.alias.get; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for listing index aliases * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetAliasesRequestBuilder extends BaseAliasesRequestBuilder { public GetAliasesRequestBuilder(OpenSearchClient client, GetAliasesAction action, String... aliases) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java index e50540e6b9aea..71cbbe2c6594f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.alias.get; import org.opensearch.cluster.metadata.AliasMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * Transport response for listing index aliases * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetAliasesResponse extends ActionResponse { private final Map> aliases; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java index 274bb8bd15e08..a70f12bed4f1f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; import org.opensearch.action.support.single.shard.SingleShardRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -62,8 +63,9 @@ /** * Transport action for analyzing text * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AnalyzeAction extends ActionType { public static final AnalyzeAction INSTANCE = new AnalyzeAction(); @@ -77,8 +79,9 @@ private AnalyzeAction() { * A request to analyze a text associated with a specific index. Allow to provide * the actual analyzer name to perform the analysis with. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends SingleShardRequest { private String[] text; @@ -308,8 +311,9 @@ public static Request fromXContent(XContentParser parser, String index) throws I /** * Inner Response * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Response extends ActionResponse implements ToXContentObject { private final DetailAnalyzeResponse detail; @@ -404,8 +408,9 @@ static final class Fields { /** * Inner Analyze Token * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AnalyzeToken implements Writeable, ToXContentObject { private final String term; private final int startOffset; @@ -542,8 +547,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Inner Detail Analyze Response * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DetailAnalyzeResponse implements Writeable, ToXContentFragment { private final boolean customAnalyzer; @@ -709,8 +715,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Inner Analyze Token List * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AnalyzeTokenList implements Writeable, ToXContentObject { private final String name; private final AnalyzeToken[] tokens; @@ -783,8 +790,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Inner character filtered text * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class CharFilteredText implements Writeable, ToXContentObject { private final String name; private final String[] texts; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java index a7f21b2af16fc..b0240a4db82cd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java @@ -33,14 +33,16 @@ import org.opensearch.action.support.single.shard.SingleShardOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import java.util.Map; /** * Transport request builder for analyzing text * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder< AnalyzeAction.Request, AnalyzeAction.Response, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java index 3b69a0980e97f..57266b5aec58f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * Transport request for clearing cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearIndicesCacheRequest extends BroadcastRequest { private boolean queryCache = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java index e0513e77a5aa5..074e2ce0b35eb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for clearing cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBuilder< ClearIndicesCacheRequest, ClearIndicesCacheResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java index 0c6c5ca27b24d..6fe180d900311 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.cache.clear; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -45,8 +46,9 @@ /** * The response of a clear cache action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearIndicesCacheResponse extends BroadcastResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java index 0f98550343a13..e785c31c4a0b9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.util.CollectionUtils; @@ -48,8 +49,9 @@ /** * A request to close an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloseIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { private String[] indices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java index b3b53a0043c70..92c32c9ace490 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java @@ -36,12 +36,14 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for close index request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloseIndexRequestBuilder extends AcknowledgedRequestBuilder { public CloseIndexRequestBuilder(OpenSearchClient client, CloseIndexAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java index df6d0e4c9be39..2e0c5cb5842b4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -54,8 +55,9 @@ /** * Transport response for closing an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloseIndexResponse extends ShardsAcknowledgedResponse { private final List indices; @@ -99,8 +101,9 @@ public String toString() { /** * Inner index result * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class IndexResult implements Writeable, ToXContentFragment { private final Index index; @@ -199,8 +202,9 @@ public String toString() { /** * Shard Result from Close Index Response * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardResult implements Writeable, ToXContentFragment { private final int id; @@ -257,8 +261,9 @@ public String toString() { /** * Inner Failure if something goes wrong * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends DefaultShardOperationFailedException { private @Nullable String nodeId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index d048a3008f588..e5dbefc3dba97 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -42,6 +42,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentFactory; @@ -80,8 +81,9 @@ * @see org.opensearch.client.Requests#createIndexRequest(String) * @see CreateIndexResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateIndexRequest extends AcknowledgedRequest implements IndicesRequest { public static final ParseField MAPPINGS = new ParseField("mappings"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java index 3493ee06827c1..b233f45422967 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentType; @@ -48,8 +49,9 @@ /** * Builder for a create index request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder< CreateIndexRequest, CreateIndexResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java index 1b3ad48402eed..3258ffd8672a1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.create; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -49,8 +50,9 @@ /** * A response for a create index action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateIndexResponse extends ShardsAcknowledgedResponse { private static final ParseField INDEX = new ParseField("index"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java index 0443325f82778..c5c03c93785d2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ * Represents a request to delete a particular dangling index, specified by its UUID. The {@link #acceptDataLoss} * flag must also be explicitly set to true, or later validation will fail. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteDanglingIndexRequest extends AcknowledgedRequest { private final String indexUUID; private final boolean acceptDataLoss; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java index 590f08a82c1d2..2702b6a05c4bb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ * by its UUID. The {@link #acceptDataLoss} flag must also be * explicitly set to true, or later validation will fail. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ImportDanglingIndexRequest extends AcknowledgedRequest { private final String indexUUID; private final boolean acceptDataLoss; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java index adbfe9b760ed8..119c4acbf4160 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.dangling.list; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * Transport request for listing a dangling indices * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListDanglingIndicesRequest extends BaseNodesRequest { /** * Filter the response by index UUID. Leave as null to find all indices. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java index 384ae2e028bba..be63bee6312fe 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java @@ -36,6 +36,7 @@ import org.opensearch.action.admin.indices.dangling.DanglingIndexInfo; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -58,8 +59,9 @@ * information for each dangling index is presented under the "dangling_indices" key. If any nodes * in the cluster failed to answer, the details are presented under the "_nodes.failures" key. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListDanglingIndicesResponse extends BaseNodesResponse implements StatusToXContentObject { public ListDanglingIndicesResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java index 14b29c4b2bbba..4c1690d25fbd9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java @@ -47,6 +47,7 @@ import org.opensearch.cluster.metadata.MetadataCreateDataStreamService; import org.opensearch.cluster.metadata.MetadataCreateDataStreamService.CreateDataStreamClusterStateUpdateRequest; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; @@ -61,8 +62,9 @@ /** * Transport action for creating a datastream * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateDataStreamAction extends ActionType { public static final CreateDataStreamAction INSTANCE = new CreateDataStreamAction(); @@ -75,8 +77,9 @@ private CreateDataStreamAction() { /** * Request for Creating Data Stream * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends AcknowledgedRequest implements IndicesRequest { private final String name; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java index 190194e071bc4..6b0aec6a31839 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java @@ -53,6 +53,7 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.common.regex.Regex; import org.opensearch.common.unit.TimeValue; @@ -78,8 +79,9 @@ /** * Transport action for deleting a datastream * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteDataStreamAction extends ActionType { private static final Logger logger = LogManager.getLogger(DeleteDataStreamAction.class); @@ -94,8 +96,9 @@ private DeleteDataStreamAction() { /** * Request for deleting data streams * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends ClusterManagerNodeRequest implements IndicesRequest.Replaceable { private String[] names; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java index 6a7967d31653c..1db4e85887c23 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java @@ -51,6 +51,7 @@ import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; @@ -76,8 +77,9 @@ /** * Transport action for getting a datastream * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetDataStreamAction extends ActionType { public static final GetDataStreamAction INSTANCE = new GetDataStreamAction(); @@ -90,8 +92,9 @@ private GetDataStreamAction() { /** * Request for getting data streams * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { private String[] names; @@ -156,16 +159,18 @@ public IndicesRequest indices(String... indices) { /** * Response for getting data streams * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Response extends ActionResponse implements ToXContentObject { public static final ParseField DATASTREAMS_FIELD = new ParseField("data_streams"); /** * Data streams information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DataStreamInfo extends AbstractDiffable implements ToXContentObject { public static final ParseField STATUS_FIELD = new ParseField("status"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java index 006da6b3cbb09..5fbefbc6e1591 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.util.CollectionUtils; @@ -47,8 +48,9 @@ /** * A request to delete an index. Best created with {@link org.opensearch.client.Requests#deleteIndexRequest(String)}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { private String[] indices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java index 33f6342e94139..6cf0920f8570f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java @@ -36,12 +36,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for deleting an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteIndexRequestBuilder extends AcknowledgedRequestBuilder< DeleteIndexRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java index f9bd849e549b7..91fde3ec62d7b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * Transport request for checking if an index exists * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesExistsRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java index 8459bbd8b874e..2e0f28cb7e3f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for checking if an index exists * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesExistsRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< IndicesExistsRequest, IndicesExistsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java index 4ac85492229f0..a457cca74f897 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.exists.indices; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * Transport response for checking if an index exists * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesExistsResponse extends ActionResponse { private boolean exists; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java index c8b28efc5f294..f8cf6ab72e038 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -52,8 +53,9 @@ * @see org.opensearch.client.IndicesAdminClient#flush(FlushRequest) * @see FlushResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FlushRequest extends BroadcastRequest { private boolean force = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java index d0cbd1d27fba6..50d7a78c919f1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for flushing one or more indices * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FlushRequestBuilder extends BroadcastOperationRequestBuilder { public FlushRequestBuilder(OpenSearchClient client, FlushAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java index 4135654e66271..3881a839a6dcd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.flush; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -45,8 +46,9 @@ /** * A response to flush action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FlushResponse extends BroadcastResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("flush", true, arg -> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java index 89e5a57094a96..f38b49f434261 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.support.broadcast.BroadcastRequest; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.index.engine.Engine; @@ -54,8 +55,9 @@ * @see org.opensearch.client.IndicesAdminClient#forceMerge(ForceMergeRequest) * @see ForceMergeResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ForceMergeRequest extends BroadcastRequest { /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index cff05f194cac4..d8a618a1828ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A request to force merge one or more indices. In order to force merge all @@ -42,8 +43,9 @@ * merge down to. By default, will cause the force merge process to merge down * to half the configured number of segments. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ForceMergeRequestBuilder extends BroadcastOperationRequestBuilder< ForceMergeRequest, ForceMergeResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java index c5316f3375ad5..e6a7fe0025b87 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.forcemerge; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -47,8 +48,9 @@ /** * A response for force merge action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ForceMergeResponse extends BroadcastResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java index 0b2084865e23e..47c59791edf04 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,14 +44,16 @@ /** * A request to retrieve information about an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexRequest extends ClusterInfoRequest { /** * The features to get. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Feature { ALIASES((byte) 0), MAPPINGS((byte) 1), diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java index 3019191e5570e..e97319abe5f98 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder to get information about an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexRequestBuilder extends ClusterInfoRequestBuilder { public GetIndexRequestBuilder(OpenSearchClient client, GetIndexAction action, String... indices) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java index c135b511c3315..5a237b8d3470f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -57,8 +58,9 @@ /** * A response for a get index action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexResponse extends ActionResponse implements ToXContentObject { private Map mappings = Map.of(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java index 6a1d04efa9714..f18f973c07959 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetFieldMappingsRequest extends ActionRequest implements IndicesRequest.Replaceable { protected boolean local = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java index ebc0c015c5140..d379bfbecafd0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java @@ -35,13 +35,15 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * A helper class to build {@link GetFieldMappingsRequest} objects * - * @opensearch.internal - **/ + * @opensearch.api + */ +@PublicApi(since = "1.0.0") public class GetFieldMappingsRequestBuilder extends ActionRequestBuilder { public GetFieldMappingsRequestBuilder(OpenSearchClient client, GetFieldMappingsAction action, String... indices) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index 94c88e30295a8..86533f14e83e1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -65,8 +66,9 @@ * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetFieldMappingsResponse extends ActionResponse implements ToXContentObject { private static final ParseField MAPPINGS = new ParseField("mappings"); @@ -178,8 +180,9 @@ private void addFieldMappingsToBuilder(XContentBuilder builder, Params params, M /** * Metadata for field mappings for toXContent * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class FieldMappingMetadata implements ToXContentFragment { private static final ParseField FULL_NAME = new ParseField("full_name"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java index 3988b0dd5a508..cd0ecdb30e5fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -41,8 +42,9 @@ /** * Transport request to get field mappings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetMappingsRequest extends ClusterInfoRequest { public GetMappingsRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 85bf8c2ffd9c6..36ca1cb088cb5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder to get field mappings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder< GetMappingsRequest, GetMappingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java index e59f08c4fa162..56c979c20b6d9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -52,8 +53,9 @@ /** * Transport response to get field mappings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetMappingsResponse extends ActionResponse implements ToXContentFragment { private static final ParseField MAPPINGS = new ParseField("mappings"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 32f751ceb1c5a..8122db3278795 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -39,6 +39,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.Strings; @@ -75,8 +76,9 @@ * @see org.opensearch.client.IndicesAdminClient#putMapping(PutMappingRequest) * @see AcknowledgedResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutMappingRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable, ToXContentObject { private static final Set RESERVED_FIELDS = Set.of( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index 9d703bf428c8e..d44b243bb0edb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; @@ -45,8 +46,9 @@ /** * Builder for a put mapping request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder< PutMappingRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java index 16451e311e7d3..f48ec1ae6fb71 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.util.CollectionUtils; @@ -49,8 +50,9 @@ /** * A request to open an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { private String[] indices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java index bf09c3f173491..19770255b0ee1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java @@ -36,12 +36,14 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for for open index request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenIndexRequestBuilder extends AcknowledgedRequestBuilder { public OpenIndexRequestBuilder(OpenSearchClient client, OpenIndexAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java index bd96a1071c129..78af1abc3ce31 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.open; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -43,8 +44,9 @@ /** * A response for a open index action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenIndexResponse extends ShardsAcknowledgedResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java index b5097f96fe52b..1fb8514cbf48c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.util.CollectionUtils; @@ -49,8 +50,9 @@ /** * A request to add a block to an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AddIndexBlockRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { private final APIBlock block; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java index 8322ba19f433e..ebcdf700d3b6e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java @@ -36,12 +36,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; +import org.opensearch.common.annotation.PublicApi; /** * Builder for add index block request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AddIndexBlockRequestBuilder extends AcknowledgedRequestBuilder< AddIndexBlockRequest, AddIndexBlockResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java index 13cee3f8e0159..3ab64fa55af8b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -54,8 +55,9 @@ /** * Transport response to open an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AddIndexBlockResponse extends ShardsAcknowledgedResponse { private final List indices; @@ -99,8 +101,9 @@ public String toString() { /** * Result for adding a block * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AddBlockResult implements Writeable, ToXContentFragment { private final Index index; @@ -199,8 +202,9 @@ public String toString() { /** * Per shard result for adding a block * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AddBlockShardResult implements Writeable, ToXContentFragment { private final int id; @@ -258,8 +262,9 @@ public String toString() { /** * Contains failure information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends DefaultShardOperationFailedException { private @Nullable String nodeId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java index 69b20b697dd9a..aca98a6d9c571 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * Request for recovery information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryRequest extends BroadcastRequest { private boolean detailed = false; // Provides extra details in the response diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java index 99a1fb430fb28..2f44a5f2df04a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Recovery information request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryRequestBuilder extends BroadcastOperationRequestBuilder { /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java index e6440fd95aa39..27b6b334ef4b4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.recovery; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -48,8 +49,9 @@ /** * Information regarding the recovery state of indices and their associated shards. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryResponse extends BroadcastResponse { private final Map> shardRecoveryStates; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java index c6e230cc66373..c8000cbc40da8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.refresh; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -46,8 +47,9 @@ * @see org.opensearch.client.IndicesAdminClient#refresh(RefreshRequest) * @see RefreshResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RefreshRequest extends BroadcastRequest { public RefreshRequest(String... indices) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java index 5b27ae13f24be..ebafc726bfd39 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A refresh request making all operations performed since the last refresh available for search. The (near) real-time * capabilities depends on the index engine used. For example, the internal one requires refresh to be called, but by * default a refresh is scheduled periodically. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RefreshRequestBuilder extends BroadcastOperationRequestBuilder { public RefreshRequestBuilder(OpenSearchClient client, RefreshAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java index 4c89962dfec5e..30351b8983717 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.refresh; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -45,8 +46,9 @@ /** * The response of a refresh action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RefreshResponse extends BroadcastResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("refresh", true, arg -> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java index b82b68f6f9489..9a913c6bcafff 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -19,8 +20,9 @@ /** * Request for Segment Replication stats information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SegmentReplicationStatsRequest extends BroadcastRequest { private boolean detailed = false; // Provides extra details in the response private boolean activeOnly = false; // Only reports on active segment replication events diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java index 7e68d2ac59f07..9f00bff414cf5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Segment Replication stats information request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SegmentReplicationStatsRequestBuilder extends BroadcastOperationRequestBuilder< SegmentReplicationStatsRequest, SegmentReplicationStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java index f1b6b90e0cae1..e65e13a945abd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.indices.replication; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -24,8 +25,9 @@ /** * Stats Information regarding the Segment Replication state of indices and their associated shards. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SegmentReplicationStatsResponse extends BroadcastResponse { private final Map> replicationStats; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java index 98c2446e17998..e20e4c2d868a8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.core.ParseField; @@ -82,8 +83,9 @@ /** * Transport action to resolve an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ResolveIndexAction extends ActionType { public static final ResolveIndexAction INSTANCE = new ResolveIndexAction(); @@ -96,8 +98,9 @@ private ResolveIndexAction() { /** * Request for resolving an index * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends ActionRequest implements IndicesRequest.Replaceable { public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpen(); @@ -196,8 +199,9 @@ public String getName() { /** * The resolved index * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ResolvedIndex extends ResolvedIndexAbstraction implements Writeable, ToXContentObject { static final ParseField ALIASES_FIELD = new ParseField("aliases"); @@ -284,8 +288,9 @@ public int hashCode() { /** * The resolved index alias * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ResolvedAlias extends ResolvedIndexAbstraction implements Writeable, ToXContentObject { static final ParseField INDICES_FIELD = new ParseField("indices"); @@ -346,8 +351,9 @@ public int hashCode() { /** * The resolved data stream * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ResolvedDataStream extends ResolvedIndexAbstraction implements Writeable, ToXContentObject { static final ParseField BACKING_INDICES_FIELD = new ParseField("backing_indices"); @@ -418,8 +424,9 @@ public int hashCode() { /** * Response for resolving an index * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Response extends ActionResponse implements ToXContentObject { static final ParseField INDICES_FIELD = new ParseField("indices"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java index e014d6d703500..4d0b7fc8c13c7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.rollover; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; @@ -42,8 +43,9 @@ /** * Base class for rollover request conditions * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class Condition implements NamedWriteable, ToXContentFragment { protected T value; @@ -96,8 +98,9 @@ public String name() { /** * Holder for index stats used to evaluate conditions * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Stats { public final long numDocs; public final long indexCreated; @@ -113,8 +116,9 @@ public Stats(long numDocs, long indexCreated, ByteSizeValue indexSize) { /** * Holder for evaluated condition result * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Result { public final Condition condition; public final boolean matched; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java index 353cdbbbc840c..68c0076bbd302 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -58,8 +59,9 @@ * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RolloverRequest extends AcknowledgedRequest implements IndicesRequest { private static final ObjectParser PARSER = new ObjectParser<>("rollover"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java index 9e6b8518e92d3..acac7102edbc7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.unit.ByteSizeValue; @@ -42,8 +43,9 @@ /** * Transport request to rollover an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RolloverRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< RolloverRequest, RolloverResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java index a66fcc9e9bcf2..b7df35cd480bb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.rollover; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -55,8 +56,9 @@ * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RolloverResponse extends ShardsAcknowledgedResponse implements ToXContentObject { private static final ParseField NEW_INDEX = new ParseField("new_index"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java index 7249bc5e9d3ba..4b37da2c99850 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java @@ -32,6 +32,8 @@ package org.opensearch.action.admin.indices.segments; +import org.opensearch.common.annotation.PublicApi; + import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -41,8 +43,9 @@ /** * List of Index Segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexSegments implements Iterable { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java index a6caf0649fde1..8fdc050511050 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.segments; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; import java.util.Arrays; @@ -40,8 +41,9 @@ /** * List of Index Shard Segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardSegments implements Iterable { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java index 36f5979552011..648f58dada4f9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -37,6 +37,7 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -55,8 +56,9 @@ /** * Transport response for retrieving indices segment information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesSegmentResponse extends BroadcastResponse { private final ShardSegments[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java index 03a41cd21572f..aff2b383df08f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.segments; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * Transport request for retrieving indices segment information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesSegmentsRequest extends BroadcastRequest { protected boolean verbose = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java index 4b758e1f4bfb1..579b6d997acd9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for retrieving indices segment information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesSegmentsRequestBuilder extends BroadcastOperationRequestBuilder< IndicesSegmentsRequest, IndicesSegmentResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java index a4dc901544e0b..84edec384b68a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -25,7 +26,10 @@ /** * Transport request for retrieving PITs segment information + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class PitSegmentsRequest extends BroadcastRequest { private boolean verbose = false; private final List pitIds = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java index 90317542244ff..09adda4d79108 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.segments; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -45,8 +46,9 @@ /** * Collection of shard segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardSegments implements Writeable, Iterable { private final ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java index 01383b6b6545d..547cfa8c3bce3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,8 +49,9 @@ /** * Transport request for getting index segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSettingsRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java index 84cd4e8682e93..5ba42c05dccf0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java @@ -35,13 +35,15 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * Transport request builder for getting index segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSettingsRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetSettingsRequest, GetSettingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java index 61c9b68629194..695c98684f0f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.settings.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.action.ActionResponse; @@ -53,8 +54,9 @@ /** * Transport response for getting index segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSettingsResponse extends ActionResponse implements ToXContentObject { private final Map indexToSettings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 43571dc8220f9..45172e313dfcc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -60,8 +61,9 @@ /** * Request for an update index settings action * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateSettingsRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index 7501f0c7798de..08d7a240aa007 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -44,8 +45,9 @@ /** * Builder for an update index settings request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder< UpdateSettingsRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java index bc9633f2bd2db..b986ef3c62e73 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java @@ -37,12 +37,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.annotation.PublicApi; /** * Request builder for {@link IndicesShardStoresRequest} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesShardStoreRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< IndicesShardStoresRequest, IndicesShardStoresResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java index d3261bea68f38..ea07325c35c3b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * Request for {@link IndicesShardStoresAction} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesShardStoresRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 98132b25a6d99..c2f373fcbd6a1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; @@ -53,15 +54,17 @@ * Consists of {@link StoreStatus}s for requested indices grouped by * indices and shard ids and a list of encountered node {@link Failure}s * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesShardStoresResponse extends ActionResponse implements ToXContentFragment { /** * Shard store information from a node * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class StoreStatus implements Writeable, ToXContentFragment, Comparable { private final DiscoveryNode node; private final String allocationId; @@ -70,7 +73,10 @@ public static class StoreStatus implements Writeable, ToXContentFragment, Compar /** * The status of the shard store with respect to the cluster + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum AllocationStatus { /** @@ -232,8 +238,9 @@ public int compareTo(StoreStatus other) { /** * Single node failure while retrieving shard store information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends DefaultShardOperationFailedException { private String nodeId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index a24a33006ac1e..a5225f2243876 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -40,6 +40,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -57,8 +58,9 @@ /** * Request class to shrink an index into a single shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ResizeRequest extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { public static final ObjectParser PARSER = new ObjectParser<>("resize_request"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java index 855e678c77b9b..f9d90d46b0904 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -36,14 +36,16 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeValue; /** * Transport request builder for resizing an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ResizeRequestBuilder extends AcknowledgedRequestBuilder { public ResizeRequestBuilder(OpenSearchClient client, ActionType action) { super(client, action, new ResizeRequest()); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java index 1aa09023e3583..a4801f84c9ef9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.shrink; import org.opensearch.action.admin.indices.create.CreateIndexResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; @@ -42,8 +43,9 @@ /** * A response for a resize index action, either shrink or split index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ResizeResponse extends CreateIndexResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java index 6403ed735ae49..91bcc0d62b1c6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java @@ -32,11 +32,14 @@ package org.opensearch.action.admin.indices.shrink; +import org.opensearch.common.annotation.PublicApi; + /** * The type of the resize operation * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ResizeType { SHRINK, SPLIT, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java index e4abaef4ddfa8..8bfeb13b253c3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java @@ -34,6 +34,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -68,8 +69,9 @@ /** * Common Stats for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CommonStats implements Writeable, ToXContentFragment { @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 7503020d1c8ef..a7d9f95b80f7b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.stats; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Common Stats Flags for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CommonStatsFlags implements Writeable, Cloneable { public static final CommonStatsFlags ALL = new CommonStatsFlags().all(); @@ -255,8 +257,9 @@ public CommonStatsFlags clone() { /** * The flags. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Flag { Store("store", 0), Indexing("indexing", 1), diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java index 1635ce0bf83fc..b0143d9491087 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.stats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * IndexShardStats for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardStats implements Iterable, Writeable { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java index 1c57ca39576b0..09614ea801193 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java @@ -32,6 +32,8 @@ package org.opensearch.action.admin.indices.stats; +import org.opensearch.common.annotation.PublicApi; + import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -41,8 +43,9 @@ /** * Index Stats for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexStats implements Iterable { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java index 54f3e9b7d1a24..2b64464a76899 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.stats; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ * All the stats to be returned can be cleared using {@link #clear()}, at which point, specific * stats can be enabled. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesStatsRequest extends BroadcastRequest { private CommonStatsFlags flags = new CommonStatsFlags(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java index c211812b32c48..acc085a96a896 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** @@ -45,8 +46,9 @@ * All the stats to be returned can be cleared using {@link #clear()}, at which point, specific * stats can be enabled. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder< IndicesStatsRequest, IndicesStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java index 6f051fa19c99f..6242081cd2371 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.stats.IndexStats.IndexStatsBuilder; import org.opensearch.action.support.broadcast.BroadcastResponse; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -54,8 +55,9 @@ /** * Transport response for retrieving indices stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesStatsResponse extends BroadcastResponse { private ShardStats[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java index 77562fa19b319..4ed1ce95b7de2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -49,8 +50,9 @@ /** * Shard Stats for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardStats implements Writeable, ToXContentFragment { private ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java index 85524bddc56d8..e93a428b0af26 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java @@ -33,6 +33,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * A request to delete an index template. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteIndexTemplateRequest extends ClusterManagerNodeRequest { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index 4a990b7837120..60771cfa453ae 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for deleting an index template * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteIndexTemplateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DeleteIndexTemplateRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java index 18142eb7b787d..fa45efdc53124 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java @@ -33,6 +33,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Request that allows to retrieve index templates * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexTemplatesRequest extends ClusterManagerNodeReadRequest { private String[] names; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java index 09de1733239fc..f8c02b4c8be08 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java @@ -33,12 +33,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Request builder to retrieve one or more Index templates * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexTemplatesRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetIndexTemplatesRequest, GetIndexTemplatesResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index 761a345b49538..009a73b615e0c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.template.get; import org.opensearch.cluster.metadata.IndexTemplateMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -49,8 +50,9 @@ /** * Response for retrieving one or more Index templates * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexTemplatesResponse extends ActionResponse implements ToXContentObject { private final List indexTemplates; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java index 028275778538e..c4396f22d7c16 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ * Transport Request for handling simulating an index template either by name (looking it up in the * cluster state), or by a provided template configuration * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulateIndexTemplateRequest extends ClusterManagerNodeReadRequest { private String indexName; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 8a370a6845db6..c6a9d3530a8cc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.metadata.Template; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -50,8 +51,9 @@ /** * Contains the information on what V2 templates would match a given index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulateIndexTemplateResponse extends ActionResponse implements ToXContentObject { private static final ParseField TEMPLATE = new ParseField("template"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java index 1979cc39e32fb..ed209e18b64ef 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java @@ -41,6 +41,7 @@ import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.regex.Regex; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -54,8 +55,9 @@ /** * An action for putting a composable template into the cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutComposableIndexTemplateAction extends ActionType { public static final PutComposableIndexTemplateAction INSTANCE = new PutComposableIndexTemplateAction(); @@ -68,8 +70,9 @@ private PutComposableIndexTemplateAction() { /** * A request for putting a single index template into the cluster state * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends ClusterManagerNodeRequest implements IndicesRequest { private final String name; @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 4f7a28a03e6fb..d4e9200508bfa 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -41,6 +41,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.LoggingDeprecationHandler; @@ -78,8 +79,9 @@ /** * A request to create an index template. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutIndexTemplateRequest extends ClusterManagerNodeRequest implements IndicesRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index 04d2236e00e8f..931d12de574ae 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; @@ -47,8 +48,9 @@ /** * A request builder for putting an index template into the cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutIndexTemplateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< PutIndexTemplateRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java index 32cf6d4bfe70d..e760067e9f5be 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.upgrade.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; import java.util.Arrays; @@ -40,8 +41,9 @@ /** * Status for an Index Shard Upgrade * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardUpgradeStatus implements Iterable { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java index 4513a321e2a51..fd8ddc1293aaf 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java @@ -32,6 +32,8 @@ package org.opensearch.action.admin.indices.upgrade.get; +import org.opensearch.common.annotation.PublicApi; + import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -41,8 +43,9 @@ /** * Status for an Index Upgrade * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexUpgradeStatus implements Iterable { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java index 57fb2513faf78..783b44ba6570d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.broadcast.BroadcastShardResponse; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * Status for a Shard Upgrade * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardUpgradeStatus extends BroadcastShardResponse { private ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java index 987661d2129ac..2584ab6b370da 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.upgrade.get; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -41,8 +42,9 @@ /** * Transport Request for retrieving status of upgrading an Index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeStatusRequest extends BroadcastRequest { public UpgradeStatusRequest() { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java index c698c38fe12d5..ac5f881c35dc5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport Request Builder for retrieving status of upgrading an Index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeStatusRequestBuilder extends BroadcastOperationRequestBuilder< UpgradeStatusRequest, UpgradeStatusResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java index 20dbd12287231..ba2915ee4ddf1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.upgrade.get; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ /** * Transport Response for retrieving status of upgrading an Index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeStatusResponse extends BroadcastResponse { private ShardUpgradeStatus[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java index 98c307c37ea54..4df02ad7fa8c0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.upgrade.post; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ * @see org.opensearch.client.IndicesAdminClient#upgrade(UpgradeRequest) * @see UpgradeResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeRequest extends BroadcastRequest { /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java index 8203f9d51b8e4..bf316504dc920 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A request to upgrade one or more indices. In order to optimize on all the indices, pass an empty array or * {@code null} for the indices. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeRequestBuilder extends BroadcastOperationRequestBuilder { public UpgradeRequestBuilder(OpenSearchClient client, UpgradeAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java index d08cc65832f8f..4bdf41dabba37 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; @@ -47,8 +48,9 @@ /** * A response for the upgrade action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeResponse extends BroadcastResponse { private final Map> versions; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java index 248cab5e40eaf..84aa687af0fe1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.validate.query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ /** * Query Explanation * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class QueryExplanation implements Writeable, ToXContentFragment { public static final String INDEX_FIELD = "index"; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java index ef0d14502af23..94bec696dd2c1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -53,8 +54,9 @@ *

    * The request requires the query to be set using {@link #query(QueryBuilder)} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ValidateQueryRequest extends BroadcastRequest implements ToXContentObject { private QueryBuilder query = new MatchAllQueryBuilder(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java index 6209f41d88be2..de4cf5ae2b904 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryBuilder; /** * Transport Request Builder to Validate a Query * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder< ValidateQueryRequest, ValidateQueryResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java index 44d6d637cdc51..791128491f33d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.validate.query; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; @@ -52,8 +53,9 @@ /** * The response of the validate action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ValidateQueryResponse extends BroadcastResponse { public static final String VALID_FIELD = "valid"; diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java index 20dea872e0ad2..81e8940f1b505 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java @@ -41,6 +41,7 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.update.UpdateResponse; import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; @@ -68,8 +69,9 @@ * Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id * of the relevant action, and if it has failed or not (with the failure message in case it failed). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BulkItemResponse implements Writeable, StatusToXContentObject { private static final String _INDEX = "_index"; @@ -179,8 +181,9 @@ public static BulkItemResponse fromXContent(XContentParser parser, int id) throw /** * Represents a failure. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure implements Writeable, ToXContentFragment { public static final String INDEX_FIELD = "index"; public static final String ID_FIELD = "id"; diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java index f2f3077001a13..47abd0337fcf9 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java @@ -45,6 +45,7 @@ import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.action.update.UpdateRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; @@ -71,8 +72,9 @@ * Note that we only support refresh on the bulk request not per item. * @see org.opensearch.client.Client#bulk(BulkRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BulkRequest extends ActionRequest implements CompositeIndicesRequest, WriteRequest, Accountable { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(BulkRequest.class); diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java index 08eee82a53cf9..a165d186d3878 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java @@ -44,6 +44,7 @@ import org.opensearch.action.update.UpdateRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.MediaType; @@ -52,8 +53,9 @@ * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes * it in a single batch. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BulkRequestBuilder extends ActionRequestBuilder implements WriteRequestBuilder { public BulkRequestBuilder(OpenSearchClient client, BulkAction action, @Nullable String globalIndex) { diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java index 655e65bc22138..6b70e2acd41d2 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.bulk; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.action.ActionResponse; @@ -56,8 +57,9 @@ * bulk requests. Each item holds the index/type/id is operated on, and if it failed or not (with the * failure message). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BulkResponse extends ActionResponse implements Iterable, StatusToXContentObject { private static final String ITEMS = "items"; diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java index 9a9e861ad8055..d37e049d44720 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java @@ -39,6 +39,7 @@ import org.opensearch.action.DocWriteRequest; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -64,8 +65,9 @@ * @see org.opensearch.client.Client#delete(DeleteRequest) * @see org.opensearch.client.Requests#deleteRequest(String) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteRequest extends ReplicatedWriteRequest implements DocWriteRequest, diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java index 0436962ce01d2..66aa1c73042fe 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java @@ -36,13 +36,15 @@ import org.opensearch.action.support.replication.ReplicationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.VersionType; /** * A delete document action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteRequestBuilder extends ReplicationRequestBuilder implements WriteRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java b/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java index e4d44197a8885..c39a787ad763d 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.delete; import org.opensearch.action.DocWriteResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; @@ -48,8 +49,9 @@ * @see org.opensearch.action.delete.DeleteRequest * @see org.opensearch.client.Client#delete(DeleteRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteResponse extends DocWriteResponse { public DeleteResponse(ShardId shardId, StreamInput in) throws IOException { @@ -112,8 +114,9 @@ public static void parseXContentFields(XContentParser parser, Builder context) t * temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to * instantiate the {@link DeleteResponse}. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder extends DocWriteResponse.Builder { @Override diff --git a/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java b/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java index aad5a9853a3c5..dd9c01b04acbb 100644 --- a/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java +++ b/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.single.shard.SingleShardRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -54,8 +55,9 @@ /** * Explain request encapsulating the explain query and document identifier to get an explanation for. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ExplainRequest extends SingleShardRequest implements ToXContentObject { private static final ParseField QUERY_FIELD = new ParseField("query"); diff --git a/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java b/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java index 3031cb6067469..681b48f7a6593 100644 --- a/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.single.shard.SingleShardOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.index.query.QueryBuilder; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -42,8 +43,9 @@ /** * A builder for {@link ExplainRequest}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ExplainRequestBuilder extends SingleShardOperationRequestBuilder { ExplainRequestBuilder(OpenSearchClient client, ExplainAction action) { diff --git a/server/src/main/java/org/opensearch/action/explain/ExplainResponse.java b/server/src/main/java/org/opensearch/action/explain/ExplainResponse.java index 895f87b83b6ff..80a8634e62d87 100644 --- a/server/src/main/java/org/opensearch/action/explain/ExplainResponse.java +++ b/server/src/main/java/org/opensearch/action/explain/ExplainResponse.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.Explanation; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -56,8 +57,9 @@ /** * Response containing the score explanation. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ExplainResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField _INDEX = new ParseField("_index"); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java index 9b01f65f0f711..20b9789972fff 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java @@ -32,6 +32,7 @@ package org.opensearch.action.fieldcaps; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -60,8 +61,9 @@ /** * Describes the capabilities of a field optionally merged across multiple indices. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FieldCapabilities implements Writeable, ToXContentObject { private static final ParseField TYPE_FIELD = new ParseField("type"); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java index c1c54c7018bcf..71e8db73e4ecc 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -52,7 +53,10 @@ /** * Transport request for retrieving field capabilities for an explicit list of fields + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class FieldCapabilitiesRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContentObject { public static final String NAME = "field_caps_request"; diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java index 70a90b98bdf25..c589d344089f3 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryBuilder; /** * Transport request builder for retrieving field capabilities * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FieldCapabilitiesRequestBuilder extends ActionRequestBuilder { public FieldCapabilitiesRequestBuilder(OpenSearchClient client, FieldCapabilitiesAction action, String... indices) { super(client, action, new FieldCapabilitiesRequest().indices(indices)); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java index 2dcda2bf56b8f..72fdc75686e3b 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.fieldcaps; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -57,8 +58,10 @@ /** * Response for {@link FieldCapabilitiesRequest} requests. * - * @opensearch.internal + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FieldCapabilitiesResponse extends ActionResponse implements ToXContentObject { private static final ParseField INDICES_FIELD = new ParseField("indices"); private static final ParseField FIELDS_FIELD = new ParseField("fields"); diff --git a/server/src/main/java/org/opensearch/action/get/GetRequest.java b/server/src/main/java/org/opensearch/action/get/GetRequest.java index 62d85d6d8f352..952fa8bdab63a 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.RealtimeRequest; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.single.shard.SingleShardRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -60,8 +61,9 @@ * @see org.opensearch.client.Requests#getRequest(String) * @see org.opensearch.client.Client#get(GetRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRequest extends SingleShardRequest implements RealtimeRequest { private String id; diff --git a/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java b/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java index 6237cf73f0ca8..f50cbb16186f7 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.single.shard.SingleShardOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.index.VersionType; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -42,8 +43,9 @@ /** * A get document action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRequestBuilder extends SingleShardOperationRequestBuilder { public GetRequestBuilder(OpenSearchClient client, GetAction action) { diff --git a/server/src/main/java/org/opensearch/action/get/GetResponse.java b/server/src/main/java/org/opensearch/action/get/GetResponse.java index c86128444d7eb..f7f7241933bd6 100644 --- a/server/src/main/java/org/opensearch/action/get/GetResponse.java +++ b/server/src/main/java/org/opensearch/action/get/GetResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.get; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.document.DocumentField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.ParsingException; @@ -58,8 +59,9 @@ * @see GetRequest * @see org.opensearch.client.Client#get(GetRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetResponse extends ActionResponse implements Iterable, ToXContentObject { GetResult getResult; diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java b/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java index 19c9b785e7ea2..09b4205ffe521 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -41,8 +42,9 @@ /** * A single multi get response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiGetItemResponse implements Writeable { private final GetResponse response; diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java index 7f0844d717488..b15c69a41972f 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java @@ -42,6 +42,7 @@ import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -69,8 +70,9 @@ /** * Transport request for a multi get. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiGetRequest extends ActionRequest implements Iterable, @@ -91,8 +93,9 @@ public class MultiGetRequest extends ActionRequest /** * A single get item. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Item implements Writeable, IndicesRequest, ToXContentObject { private String index; diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java index c317edc07da8b..0b701c8ec11c7 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A multi get document action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiGetRequestBuilder extends ActionRequestBuilder { public MultiGetRequestBuilder(OpenSearchClient client, MultiGetAction action) { diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java b/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java index 73372b85be3b2..3c3489a065d9a 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -55,8 +56,9 @@ /** * Transport response for a multi get. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiGetResponse extends ActionResponse implements Iterable, ToXContentObject { private static final ParseField INDEX = new ParseField("_index"); @@ -71,8 +73,9 @@ public class MultiGetResponse extends ActionResponse implements Iterable implements DocWriteRequest, CompositeIndicesRequest { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(IndexRequest.class); diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java index 4bf7634dcb7e1..9d4ad3c32778c 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.replication.ReplicationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; @@ -47,8 +48,9 @@ /** * An index document action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexRequestBuilder extends ReplicationRequestBuilder implements WriteRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/index/IndexResponse.java b/server/src/main/java/org/opensearch/action/index/IndexResponse.java index d0aa9b57b6528..53f832fc12c43 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/opensearch/action/index/IndexResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.index; import org.opensearch.action.DocWriteResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -50,8 +51,9 @@ * @see IndexRequest * @see org.opensearch.client.Client#index(IndexRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexResponse extends DocWriteResponse { public IndexResponse(ShardId shardId, StreamInput in) throws IOException { @@ -116,8 +118,9 @@ public static void parseXContentFields(XContentParser parser, Builder context) t * temporarily store the parsed values, then the {@link Builder#build()} method is called to * instantiate the {@link IndexResponse}. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder extends DocWriteResponse.Builder { @Override public IndexResponse build() { diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java index 2f05ce3a25320..b9d916e152c3d 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * transport request to delete a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeletePipelineRequest extends AcknowledgedRequest { private String id; diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java index 6a2eb494e8d3f..bc253db85bb0f 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder to delete a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeletePipelineRequestBuilder extends ActionRequestBuilder { public DeletePipelineRequestBuilder(OpenSearchClient client, DeletePipelineAction action) { diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java index 4bae98098777d..c7266c31a5022 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * transport request to get a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetPipelineRequest extends ClusterManagerNodeReadRequest { private String[] ids; diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java index bdc13523ffdc6..593ea2156d5e3 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder to get a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetPipelineRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetPipelineRequest, GetPipelineResponse, diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineResponse.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineResponse.java index bd1b18be9a828..0719842bc985f 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineResponse.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.ingest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -57,8 +58,9 @@ /** * transport response for getting a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetPipelineResponse extends ActionResponse implements StatusToXContentObject { private List pipelines; diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java index f764e4b23860a..06e89b5f2908b 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; @@ -49,8 +50,9 @@ /** * transport request to put a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutPipelineRequest extends AcknowledgedRequest implements ToXContentObject { private String id; diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java index e734abb6d7969..e8d6a4d332319 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java @@ -35,14 +35,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; /** * Transport request builder to put a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutPipelineRequestBuilder extends ActionRequestBuilder { public PutPipelineRequestBuilder(OpenSearchClient client, PutPipelineAction action) { diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentResult.java b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentResult.java index 98a03272aff42..bc338a57f762d 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentResult.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentResult.java @@ -31,14 +31,16 @@ package org.opensearch.action.ingest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentObject; /** * Interface to simulate a document result * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface SimulateDocumentResult extends Writeable, ToXContentObject { } diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java index ec3ee981b646f..b51f25d2e62b1 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; @@ -60,8 +61,9 @@ /** * transport request to simulate a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulatePipelineRequest extends ActionRequest implements ToXContentObject { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SimulatePipelineRequest.class); diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequestBuilder.java index 55e6d95fde65c..2a5f281a5075c 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; /** * Transport request builder to simulate a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulatePipelineRequestBuilder extends ActionRequestBuilder { /** diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineResponse.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineResponse.java index 3cbbc4350c3bd..1fb3a69bd1e16 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineResponse.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -54,8 +55,9 @@ /** * transport response for simulating a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulatePipelineResponse extends ActionResponse implements ToXContentObject { private String pipelineId; private boolean verbose; diff --git a/server/src/main/java/org/opensearch/action/search/ClearScrollRequest.java b/server/src/main/java/org/opensearch/action/search/ClearScrollRequest.java index cd9e33634d918..16bf0fc46c4eb 100644 --- a/server/src/main/java/org/opensearch/action/search/ClearScrollRequest.java +++ b/server/src/main/java/org/opensearch/action/search/ClearScrollRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -50,8 +51,9 @@ /** * Transport request for clearing a search scroll * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearScrollRequest extends ActionRequest implements ToXContentObject { private List scrollIds; diff --git a/server/src/main/java/org/opensearch/action/search/ClearScrollRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/ClearScrollRequestBuilder.java index 63f64e02a9dd2..c10417e756dd6 100644 --- a/server/src/main/java/org/opensearch/action/search/ClearScrollRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/ClearScrollRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import java.util.List; /** * Transport request builder for clearing a search scroll * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearScrollRequestBuilder extends ActionRequestBuilder { public ClearScrollRequestBuilder(OpenSearchClient client, ClearScrollAction action) { diff --git a/server/src/main/java/org/opensearch/action/search/ClearScrollResponse.java b/server/src/main/java/org/opensearch/action/search/ClearScrollResponse.java index 4428f693763ed..e2580ccdd0969 100644 --- a/server/src/main/java/org/opensearch/action/search/ClearScrollResponse.java +++ b/server/src/main/java/org/opensearch/action/search/ClearScrollResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -52,8 +53,9 @@ /** * Transport response for clearing a search scroll * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearScrollResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField SUCCEEDED = new ParseField("succeeded"); diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java b/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java index a47127f18e18d..840d4becda714 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java @@ -13,6 +13,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -30,7 +31,10 @@ /** * A request to make create point in time against one or more indices. + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class CreatePitRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContent { // keep alive for pit reader context diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java index aa841a02f1d20..410b93afc3e65 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -26,7 +27,10 @@ /** * Create point in time response with point in time id and shard success / failures + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class CreatePitResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField ID = new ParseField("pit_id"); private static final ParseField CREATION_TIME = new ParseField("creation_time"); diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java index 40098c6670b65..c534b306b1404 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -23,7 +24,10 @@ /** * This class captures if deletion of pit is successful along with pit id + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class DeletePitInfo extends TransportResponse implements Writeable, ToXContent { /** * This will be true if PIT reader contexts are deleted ond also if contexts are not found. diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java index 1a66311cd9a1b..e21a63eef9433 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -11,6 +11,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContent; @@ -27,7 +28,10 @@ /** * Request to delete one or more PIT search contexts based on IDs. + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class DeletePitRequest extends ActionRequest implements ToXContentObject { /** diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java index 00274f0e610a9..469d78ad73bd9 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -28,7 +29,10 @@ /** * Response class for delete pits flow which clears the point in time search contexts + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class DeletePitResponse extends ActionResponse implements StatusToXContentObject { private final List deletePitResults; diff --git a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java index 2c6ab6437fd2a..0cde81203063c 100644 --- a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -19,8 +20,9 @@ /** * Request to delete a search pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class DeleteSearchPipelineRequest extends AcknowledgedRequest { private String id; diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java index 9bbc81c0d6a4c..91353240a8156 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java @@ -11,6 +11,7 @@ import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; @@ -22,7 +23,10 @@ /** * Inner node get all pits response + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class GetAllPitNodeResponse extends BaseNodeResponse implements ToXContentFragment { /** diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java index 948fe72eae817..336c8139561e9 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -18,7 +19,10 @@ /** * Request to get all active PIT IDs from all nodes of cluster + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class GetAllPitNodesRequest extends BaseNodesRequest { @Inject diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java index 9bb3ab6407696..8d858a00c409b 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java @@ -11,6 +11,7 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -31,7 +32,10 @@ /** * This class transforms active PIT objects from all nodes to unique PIT objects + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class GetAllPitNodesResponse extends BaseNodesResponse implements ToXContentObject { /** diff --git a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineRequest.java b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineRequest.java index f573a37fa5dab..59e95aa87985a 100644 --- a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -20,8 +21,9 @@ /** * Request to get search pipelines * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class GetSearchPipelineRequest extends ClusterManagerNodeReadRequest { private final String[] ids; diff --git a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineResponse.java b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineResponse.java index 64dde763e9ff8..0379046c8275d 100644 --- a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineResponse.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -32,8 +33,9 @@ /** * transport response for getting a search pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class GetSearchPipelineResponse extends ActionResponse implements StatusToXContentObject { private final List pipelines; diff --git a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java index 220b7247517b9..7e4ed186dd665 100644 --- a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -23,7 +24,10 @@ /** * This holds information about pit reader context such as pit id and creation time + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class ListPitInfo implements ToXContentFragment, Writeable { private final String pitId; private final long creationTime; diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java index 00e0345062d1c..5f46e0c298de4 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java @@ -38,6 +38,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.IndicesOptions.WildcardStates; import org.opensearch.common.CheckedBiConsumer; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.common.bytes.BytesReference; @@ -71,8 +72,9 @@ /** * A multi search API request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiSearchRequest extends ActionRequest implements CompositeIndicesRequest { public static final int MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT = 0; diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequestBuilder.java index f9bb90c69d925..3bf078e8ce793 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A request builder for multiple search requests. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiSearchRequestBuilder extends ActionRequestBuilder { public MultiSearchRequestBuilder(OpenSearchClient client, MultiSearchAction action) { diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java index 24ce4c2406570..70bb0b99e69df 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java @@ -35,6 +35,7 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -59,8 +60,9 @@ /** * A multi search response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiSearchResponse extends ActionResponse implements Iterable, ToXContentObject { private static final ParseField RESPONSES = new ParseField(Fields.RESPONSES); @@ -78,8 +80,9 @@ public class MultiSearchResponse extends ActionResponse implements Iterable implements ToXContentObject { private String id; private BytesReference source; diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java index 4c0fe3ac06326..8cf92934c8a52 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java @@ -8,10 +8,14 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; + /** * Enum for different Search Phases in OpenSearch - * @opensearch.internal + * + * @opensearch.api */ +@PublicApi(since = "2.9.0") public enum SearchPhaseName { DFS_PRE_QUERY("dfs_pre_query"), QUERY("query"), diff --git a/server/src/main/java/org/opensearch/action/search/SearchProgressListener.java b/server/src/main/java/org/opensearch/action/search/SearchProgressListener.java index ffc64682cb07d..34e8aacbad250 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchProgressListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchProgressListener.java @@ -38,6 +38,7 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.action.search.SearchResponse.Clusters; import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.InternalAggregations; @@ -53,6 +54,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SearchProgressListener { private static final Logger logger = LogManager.getLogger(SearchProgressListener.class); diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index fb026dae630b7..96cea17ff4972 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -38,6 +38,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -69,8 +70,9 @@ * @see org.opensearch.client.Client#search(SearchRequest) * @see SearchResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchRequest extends ActionRequest implements IndicesRequest.Replaceable { public static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java index bc43b65e5d844..e949c5e0bea29 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.query.QueryBuilder; import org.opensearch.script.Script; @@ -58,8 +59,9 @@ /** * A search action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchRequestBuilder extends ActionRequestBuilder { public SearchRequestBuilder(OpenSearchClient client, SearchAction action) { diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index ad299c11b987d..6b7c94ec3037a 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.metrics.MeanMetric; @@ -19,8 +20,9 @@ /** * Request level search stats to track coordinator level node search latencies * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.11.0") public final class SearchRequestStats implements SearchRequestOperationsListener { Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponse.java b/server/src/main/java/org/opensearch/action/search/SearchResponse.java index 96d07982d03db..899c71e91e3ab 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponse.java @@ -35,6 +35,7 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; @@ -77,8 +78,9 @@ /** * A response of a search request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField SCROLL_ID = new ParseField("_scroll_id"); @@ -573,8 +575,9 @@ public String toString() { * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful * and how many of them were skipped. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Clusters implements ToXContentFragment, Writeable { public static final Clusters EMPTY = new Clusters(0, 0, 0); @@ -677,8 +680,9 @@ public String toString() { * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful * and how many of them were skipped. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class PhaseTook implements ToXContentFragment, Writeable { static final ParseField PHASE_TOOK = new ParseField("phase_took"); private final Map phaseTookMap; diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java index 579a01f0dd932..bca2c8a52b691 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; @@ -58,8 +59,9 @@ * to parse aggregations into, which are not serializable. This is the common part that can be * shared between core and client. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchResponseSections implements ToXContentFragment { public static final ParseField EXT_FIELD = new ParseField("ext"); diff --git a/server/src/main/java/org/opensearch/action/search/SearchScrollRequest.java b/server/src/main/java/org/opensearch/action/search/SearchScrollRequest.java index cda00811e5500..044efdc36d04f 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchScrollRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchScrollRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -53,8 +54,9 @@ /** * Transport request for a search scroll * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchScrollRequest extends ActionRequest implements ToXContentObject { private String scrollId; diff --git a/server/src/main/java/org/opensearch/action/search/SearchScrollRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchScrollRequestBuilder.java index 638c595216631..41b34bd9c6c9e 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchScrollRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchScrollRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.search.Scroll; /** * A search scroll action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchScrollRequestBuilder extends ActionRequestBuilder { public SearchScrollRequestBuilder(OpenSearchClient client, SearchScrollAction action) { diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index 20370b7b17a07..d3c1043c50cce 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.CancellableTask; @@ -45,8 +46,9 @@ /** * Task storing information about a currently running {@link SearchRequest}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchTask extends CancellableTask implements SearchBackpressureTask { // generating description in a lazy way since source can be quite big private final Supplier descriptionSupplier; diff --git a/server/src/main/java/org/opensearch/action/search/SearchType.java b/server/src/main/java/org/opensearch/action/search/SearchType.java index cb86c0d6c1b4a..e549ec598380a 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchType.java +++ b/server/src/main/java/org/opensearch/action/search/SearchType.java @@ -32,11 +32,14 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; + /** * Search type represent the manner at which the search operation is executed. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum SearchType { /** * Same as {@link #QUERY_THEN_FETCH}, except for an initial scatter phase which goes and computes the distributed diff --git a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java index d30ee7e11bdfa..2d9fecddb6f7d 100644 --- a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java @@ -32,6 +32,7 @@ package org.opensearch.action.support; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -55,15 +56,17 @@ * Controls how to deal with unavailable concrete indices (closed or missing), how wildcard expressions are expanded * to actual indices (all, closed or open indices) and how to deal with wildcard expressions that resolve to no indices. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesOptions implements ToXContentFragment { /** * The wildcard states. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum WildcardStates { OPEN, CLOSED, diff --git a/server/src/main/java/org/opensearch/action/support/WriteRequest.java b/server/src/main/java/org/opensearch/action/support/WriteRequest.java index f462464b99ce8..8d53f7b005d54 100644 --- a/server/src/main/java/org/opensearch/action/support/WriteRequest.java +++ b/server/src/main/java/org/opensearch/action/support/WriteRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.action.update.UpdateRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -78,8 +79,9 @@ default R setRefreshPolicy(String refreshPolicy) { /** * The refresh policy of the request. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum RefreshPolicy implements Writeable { /** * Don't refresh after this request. The default. diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java index 5d09d880336f1..279ad401f7e56 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java +++ b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java @@ -31,6 +31,7 @@ package org.opensearch.action.support.master; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -49,8 +50,9 @@ /** * A response that indicates that a request has been acknowledged * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AcknowledgedResponse extends ActionResponse implements ToXContentObject { private static final ParseField ACKNOWLEDGED = new ParseField("acknowledged"); diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java index 80ca1629417ad..8dbf3dd4df512 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.termvectors; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -41,8 +42,9 @@ /** * A single multi term response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiTermVectorsItemResponse implements Writeable { private final TermVectorsResponse response; diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java index c055564c3fcbe..0eef737a54bb3 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java @@ -39,6 +39,7 @@ import org.opensearch.action.RealtimeRequest; import org.opensearch.action.ValidateActions; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentParser; @@ -54,8 +55,9 @@ /** * A single multi get request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiTermVectorsRequest extends ActionRequest implements Iterable, diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java index 04dfd39112d6e..6bfa402575885 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A single multi get request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiTermVectorsRequestBuilder extends ActionRequestBuilder { public MultiTermVectorsRequestBuilder(OpenSearchClient client, MultiTermVectorsAction action) { diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java index 5d40e64df1e3e..8a059829dda0e 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,15 +49,17 @@ /** * A multi get response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiTermVectorsResponse extends ActionResponse implements Iterable, ToXContentObject { /** * Represents a failure. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure implements Writeable { private final String index; private final String id; diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFilter.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFilter.java index 0e0202777794b..89d2f8567b3cb 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFilter.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFilter.java @@ -39,6 +39,7 @@ import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.TFIDFSimilarity; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; import java.util.HashMap; @@ -49,8 +50,9 @@ * Filter the term vector (doc frequency, positions, offsets) for a * document. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TermVectorsFilter { public static final int DEFAULT_MAX_QUERY_TERMS = 25; public static final int DEFAULT_MIN_TERM_FREQ = 0; @@ -179,8 +181,9 @@ public void setMaxWordLength(int maxWordLength) { /** * Internal score term * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class ScoreTerm { public String field; public String word; diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java index 825b0b4982880..a761cabb9599a 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java @@ -40,6 +40,7 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.support.single.shard.SingleShardRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentType; @@ -74,8 +75,9 @@ * Note, the {@link #index()}, and {@link #id(String)} are * required. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TermVectorsRequest extends SingleShardRequest implements RealtimeRequest { private static final ParseField INDEX = new ParseField("_index"); private static final ParseField ID = new ParseField("_id"); @@ -118,8 +120,9 @@ public class TermVectorsRequest extends SingleShardRequest i /** * Internal filter settings * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class FilterSettings { public Integer maxNumTerms; public Integer minTermFreq; @@ -572,8 +575,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * The flags. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Flag { // Do not change the order of these flags we use // the ordinal for encoding! Only append to the end! diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java index 02cfff1a6682b..ce68c06206fef 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.VersionType; @@ -46,8 +47,9 @@ * Note, the {@code index}, {@code type} and {@code id} are * required. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TermVectorsRequestBuilder extends ActionRequestBuilder { public TermVectorsRequestBuilder(OpenSearchClient client, TermVectorsAction action) { diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java index 3c338ce7338bb..7ad27808588ae 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java @@ -41,6 +41,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionResponse; @@ -62,8 +63,9 @@ * Response returning the term vector (doc frequency, positions, offsets) for a * document. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TermVectorsResponse extends ActionResponse implements ToXContentObject { /** diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index 946c3c2446173..9654bd1c114ba 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -42,6 +42,7 @@ import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.action.support.single.instance.InstanceShardOperationRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentHelper; @@ -74,8 +75,9 @@ /** * Transport request for updating an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateRequest extends InstanceShardOperationRequest implements DocWriteRequest, diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java index c97d0b4f5d13d..d662381ac5e19 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java @@ -39,6 +39,7 @@ import org.opensearch.action.support.single.instance.InstanceShardOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.VersionType; @@ -49,8 +50,9 @@ /** * Transport request builder for updating an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder implements WriteRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/update/UpdateResponse.java b/server/src/main/java/org/opensearch/action/update/UpdateResponse.java index d9f6ceacb0f3b..c7ee1742af0f2 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateResponse.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.update; import org.opensearch.action.DocWriteResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.index.shard.ShardId; @@ -48,8 +49,9 @@ /** * Transport response for updating an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateResponse extends DocWriteResponse { private static final String GET = "get"; @@ -173,8 +175,9 @@ public static void parseXContentFields(XContentParser parser, Builder context) t * temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to * instantiate the {@link UpdateResponse}. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder extends DocWriteResponse.Builder { private GetResult getResult = null; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java index fab104142e5bb..904083e96032f 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.Index; import org.opensearch.gateway.GatewayService; @@ -52,8 +53,9 @@ /** * An event received by the local node, signaling that the cluster state has changed. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterChangedEvent { private final String source; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java index 5bd858a53bee2..4c38d6fd99f5d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -56,8 +57,9 @@ * InternalClusterInfoService.shardIdentifierFromRouting(String) * for the key used in the shardSizes map * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterInfo implements ToXContentFragment, Writeable { private final Map leastAvailableSpaceUsage; private final Map mostAvailableSpaceUsage; @@ -287,8 +289,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Represents the total amount of "reserved" space on a particular data path, together with the set of shards considered. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ReservedSpace implements Writeable { public static final ReservedSpace EMPTY = new ReservedSpace(0, new HashSet<>()); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterName.java b/server/src/main/java/org/opensearch/cluster/ClusterName.java index 44321f7de9395..d6149421c3fd0 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterName.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterName.java @@ -32,6 +32,7 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; @@ -45,8 +46,9 @@ /** * Cluster Name * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterName implements Writeable { public static final Setting CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "opensearch", (s) -> { diff --git a/server/src/main/java/org/opensearch/cluster/Diff.java b/server/src/main/java/org/opensearch/cluster/Diff.java index c0e8e7038d9b4..77301b7c04b66 100644 --- a/server/src/main/java/org/opensearch/cluster/Diff.java +++ b/server/src/main/java/org/opensearch/cluster/Diff.java @@ -32,13 +32,15 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.Writeable; /** * Represents difference between states of cluster state parts * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Diff extends Writeable { /** diff --git a/server/src/main/java/org/opensearch/cluster/DiskUsage.java b/server/src/main/java/org/opensearch/cluster/DiskUsage.java index c472522baee51..33ed030c58a02 100644 --- a/server/src/main/java/org/opensearch/cluster/DiskUsage.java +++ b/server/src/main/java/org/opensearch/cluster/DiskUsage.java @@ -32,6 +32,7 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * Encapsulation class used to represent the amount of disk used on a node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DiskUsage implements ToXContentFragment, Writeable { final String nodeId; final String nodeName; diff --git a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributeValueHealth.java b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributeValueHealth.java index eae79e2ac0986..b4d797a39dd2f 100644 --- a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributeValueHealth.java +++ b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributeValueHealth.java @@ -14,6 +14,7 @@ import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -31,7 +32,10 @@ /** * Cluster Awareness AttributeValue Health information + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAwarenessAttributeValueHealth implements Writeable, ToXContentFragment { private static final String ACTIVE_SHARDS = "active_shards"; diff --git a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealth.java b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealth.java index 01d9cfc4438e3..3de2260d0e8bd 100644 --- a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealth.java +++ b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealth.java @@ -11,6 +11,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -34,7 +35,9 @@ /** * Cluster Awareness health information * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAwarenessAttributesHealth implements Iterable, Writeable, ToXContentFragment { private final String awarenessAttributeName; diff --git a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealth.java b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealth.java index 1643db5a1f460..841764110626d 100644 --- a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealth.java +++ b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealth.java @@ -11,6 +11,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; @@ -30,7 +31,10 @@ /** * Cluster state Awareness health information + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAwarenessHealth implements Writeable, ToXContentFragment, Iterable { private static final String AWARENESS_ATTRIBUTE = "awareness_attributes"; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java index 40a5080ba74e7..5fa897c0b1185 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.block; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -48,8 +49,9 @@ /** * Blocks the cluster for concurrency * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterBlock implements Writeable, ToXContentFragment { private final int id; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java index 5ec847e100c86..5d3bf94aedb19 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java @@ -32,13 +32,16 @@ package org.opensearch.cluster.block; +import org.opensearch.common.annotation.PublicApi; + import java.util.EnumSet; /** * What level to block the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ClusterBlockLevel { READ, WRITE, diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java index 3f67870781580..914743299b023 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.decommission; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -18,8 +19,9 @@ /** * {@link DecommissionAttribute} encapsulates information about decommissioned node attribute like attribute name, attribute value. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public final class DecommissionAttribute implements Writeable { private final String attributeName; private final String attributeValue; diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java index 4ca8c3cc4286e..f4f6cbf632ae2 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java @@ -8,9 +8,14 @@ package org.opensearch.cluster.decommission; +import org.opensearch.common.annotation.PublicApi; + /** * An enumeration of the states during decommissioning + * + * @opensearch.api */ +@PublicApi(since = "2.4.0") public enum DecommissionStatus { /** * Decommission process is initiated, and to-be-decommissioned leader is excluded from voting config diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java b/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java index 5ea482b8b8ffa..06f6b3d57385d 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.health; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -41,8 +42,9 @@ /** * Cluster health status * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ClusterHealthStatus implements Writeable { GREEN((byte) 0), YELLOW((byte) 1), diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java index 0bb762e3ff744..19c64965e6941 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -61,8 +62,9 @@ /** * Cluster Index Health Information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ClusterIndexHealth implements Iterable, Writeable, ToXContentFragment { private static final String STATUS = "status"; private static final String NUMBER_OF_SHARDS = "number_of_shards"; diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java index cbc63565228f9..1fe88f65248c2 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -58,8 +59,9 @@ /** * Cluster shard health information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ClusterShardHealth implements Writeable, ToXContentFragment { private static final String STATUS = "status"; private static final String ACTIVE_SHARDS = "active_shards"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java index 31e37078d8b6c..8b3cc3c3cc2cc 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentFactory; @@ -61,8 +62,9 @@ /** * Metadata for index aliases * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AliasMetadata extends AbstractDiffable implements ToXContentFragment { private final String alias; @@ -271,8 +273,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Builder of alias metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final String alias; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java index 27803cb106005..8bb25ba8e8472 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java @@ -10,6 +10,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.action.admin.cluster.crypto.CryptoSettings; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -24,8 +25,9 @@ /** * Metadata about encryption and decryption * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public class CryptoMetadata implements Writeable { static final public String CRYPTO_METADATA_KEY = "crypto_metadata"; static final public String KEY_PROVIDER_NAME_KEY = "key_provider_name"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java b/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java index 753f872f88b22..54df245b1b835 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java @@ -37,6 +37,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -59,8 +60,9 @@ /** * Primary DataStream class * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class DataStream extends AbstractDiffable implements ToXContentObject { public static final String BACKING_INDEX_PREFIX = ".ds-"; @@ -258,8 +260,9 @@ public int hashCode() { /** * A timestamp field. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class TimestampField implements Writeable, ToXContentObject { static ParseField NAME_FIELD = new ParseField("name"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 5d8b751b241e2..2e1421c278879 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.node.DiscoveryNodeFilters; import org.opensearch.cluster.routing.allocation.IndexMetadataUpdater; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Setting; @@ -450,8 +451,9 @@ public Iterator> settings() { /** * Blocks the API. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum APIBlock implements Writeable { READ_ONLY("read_only", INDEX_READ_ONLY_BLOCK), READ("read", INDEX_READ_BLOCK), diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java index c5efb55316b84..3d532208bcfe2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java @@ -273,8 +273,9 @@ public String toString() { /** * Builder of index template metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private static final Set VALID_FIELDS = Sets.newHashSet( diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java index 2081a0035303b..e8180613c0fa3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java @@ -35,6 +35,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; @@ -56,8 +57,9 @@ /** * Mapping configuration for a type. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MappingMetadata extends AbstractDiffable { public static final MappingMetadata EMPTY_MAPPINGS = new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, Collections.emptyMap()); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java index 580643b96e411..b9d2a3edf356f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.metadata; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Metadata about registered repository * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RepositoryMetadata implements Writeable { private final String name; diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java index 561f1b727efe0..2ebcd8096893d 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.Diff; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.regex.Regex; import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.Strings; @@ -62,8 +63,9 @@ * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to * access, modify merge / diff discovery nodes. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DiscoveryNodes extends AbstractDiffable implements Iterable { public static final DiscoveryNodes EMPTY_NODES = builder().build(); @@ -567,8 +569,9 @@ public String toString() { /** * Delta between nodes. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Delta { private final String localNodeId; @@ -735,8 +738,9 @@ public static Builder builder(DiscoveryNodes nodes) { /** * Builder of a map of discovery nodes. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final Map nodes; diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index b01e074ce40c2..3dcf4ae3c9b99 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -56,8 +57,9 @@ * - {@link SnapshotRecoverySource} recovery from a snapshot * - {@link LocalShardsRecoverySource} recovery from other shards of another index on the same node * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class RecoverySource implements Writeable, ToXContentObject { @Override @@ -111,8 +113,9 @@ protected void writeAdditionalFields(StreamOutput out) throws IOException { /** * Type of recovery. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Type { EMPTY_STORE, EXISTING_STORE, diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardRoutingState.java b/server/src/main/java/org/opensearch/cluster/routing/ShardRoutingState.java index a46464d8727ee..2086159790ba9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardRoutingState.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardRoutingState.java @@ -32,12 +32,15 @@ package org.opensearch.cluster.routing; +import org.opensearch.common.annotation.PublicApi; + /** * Represents the current state of a {@link ShardRouting} as defined by the * cluster. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ShardRoutingState { /** * The shard is not assigned to any node. diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index 5e748df5eed2d..cf6dc9cd7306e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; @@ -62,8 +63,9 @@ /** * Holds additional information as to why the shard is in unassigned state. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class UnassignedInfo implements ToXContentFragment, Writeable { public static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("date_optional_time").withZone(ZoneOffset.UTC); @@ -81,8 +83,9 @@ public final class UnassignedInfo implements ToXContentFragment, Writeable { * Note, ordering of the enum is important, make sure to add new values * at the end and handle version serialization properly. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Reason { /** * Unassigned as a result of an API creation of an index. @@ -157,8 +160,9 @@ public enum Reason { * Note, ordering of the enum is important, make sure to add new values * at the end and handle version serialization properly. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum AllocationStatus implements Writeable { /** * The shard was denied allocation to a node because the allocation deciders all returned a NO decision diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java index 01471ab664294..2b93a1483b801 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.routing; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -20,8 +21,9 @@ /** * Entity for Weighted Round Robin weights * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class WeightedRouting implements Writeable { private String attributeName; private Map weights; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RerouteExplanation.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RerouteExplanation.java index 6c84957d6a788..c07fb0135262b 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/RerouteExplanation.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RerouteExplanation.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.routing.allocation.command.AllocationCommand; import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -45,8 +46,9 @@ * Class encapsulating the explanation for a single {@link AllocationCommand} * taken from the Deciders * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RerouteExplanation implements ToXContentObject { private AllocationCommand command; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingExplanations.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingExplanations.java index 769212703b48b..490eb76ab8563 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingExplanations.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingExplanations.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.routing.allocation; import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; @@ -48,8 +49,9 @@ * Class used to encapsulate a number of {@link RerouteExplanation} * explanations. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RoutingExplanations implements ToXContentFragment { private final List explanations; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/Decision.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/Decision.java index 807ab070b82b1..ac5a18c3fcb21 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/Decision.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/Decision.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.routing.allocation.decider; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -54,8 +55,9 @@ * * @see AllocationDecider * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class Decision implements ToXContent, Writeable { public static final Decision ALWAYS = new Single(Type.YES); @@ -98,8 +100,9 @@ public static Decision readFrom(StreamInput in) throws IOException { * This enumeration defines the * possible types of decisions * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Type implements Writeable { YES(1), THROTTLE(2), diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java index afc4e36ec352e..827f3a12fbce4 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.Version; import org.opensearch.cluster.ClusterStateTaskExecutor; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -133,7 +134,10 @@ protected ThrottlingKey registerClusterManagerTask(String taskKey, boolean throt /** * Class to store the throttling key for the tasks of cluster manager + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ThrottlingKey { private String taskThrottlingKey; private boolean throttlingEnabled; diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index aa7766979e851..e097803d86b48 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -45,7 +45,6 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.cluster.routing.RerouteService; -import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -61,9 +60,8 @@ /** * Main Cluster Service * - * @opensearch.api + * @opensearch.internal */ -@PublicApi(since = "1.0.0") public class ClusterService extends AbstractLifecycleComponent { private final ClusterManagerService clusterManagerService; diff --git a/server/src/main/java/org/opensearch/cluster/service/PendingClusterTask.java b/server/src/main/java/org/opensearch/cluster/service/PendingClusterTask.java index b1723cca518fd..b06c537e7bac5 100644 --- a/server/src/main/java/org/opensearch/cluster/service/PendingClusterTask.java +++ b/server/src/main/java/org/opensearch/cluster/service/PendingClusterTask.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.service; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Represents a task that is pending in the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingClusterTask implements Writeable { private long insertOrder; diff --git a/server/src/main/java/org/opensearch/common/FieldMemoryStats.java b/server/src/main/java/org/opensearch/common/FieldMemoryStats.java index 86a2fe1397cec..2e04f39e48c4c 100644 --- a/server/src/main/java/org/opensearch/common/FieldMemoryStats.java +++ b/server/src/main/java/org/opensearch/common/FieldMemoryStats.java @@ -32,6 +32,7 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -47,8 +48,9 @@ /** * A reusable class to encode {@code field -> memory size} mappings * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class FieldMemoryStats implements Writeable, Iterable> { private final Map stats; diff --git a/server/src/main/java/org/opensearch/common/Priority.java b/server/src/main/java/org/opensearch/common/Priority.java index 09a751362c945..4f03d6e363550 100644 --- a/server/src/main/java/org/opensearch/common/Priority.java +++ b/server/src/main/java/org/opensearch/common/Priority.java @@ -32,6 +32,7 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -40,8 +41,9 @@ /** * Priority levels. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum Priority { IMMEDIATE((byte) 0), diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java index 4bdce11ff4f9a..1264551401b4c 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java @@ -17,6 +17,8 @@ /** * ReadContext is used to encapsulate all data needed by BlobContainer#readBlobAsync + * + * @opensearch.experimental */ @ExperimentalApi public class ReadContext { @@ -55,8 +57,11 @@ public List getPartStreams() { /** * Functional interface defining an instance that can create an async action * to create a part of an object represented as an InputStreamContainer. + * + * @opensearch.experimental */ @FunctionalInterface + @ExperimentalApi public interface StreamPartCreator extends Supplier> { /** * Kicks off a async process to start streaming. diff --git a/server/src/main/java/org/opensearch/common/document/DocumentField.java b/server/src/main/java/org/opensearch/common/document/DocumentField.java index 8ce672d4fb3fc..5cdc2bba8be16 100644 --- a/server/src/main/java/org/opensearch/common/document/DocumentField.java +++ b/server/src/main/java/org/opensearch/common/document/DocumentField.java @@ -32,6 +32,7 @@ package org.opensearch.common.document; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -56,8 +57,9 @@ * @see SearchHit * @see GetResult * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DocumentField implements Writeable, ToXContentFragment, Iterable { private final String name; diff --git a/server/src/main/java/org/opensearch/common/geo/ShapeRelation.java b/server/src/main/java/org/opensearch/common/geo/ShapeRelation.java index 0a5a66ef54c9c..d5b761a531e7f 100644 --- a/server/src/main/java/org/opensearch/common/geo/ShapeRelation.java +++ b/server/src/main/java/org/opensearch/common/geo/ShapeRelation.java @@ -33,6 +33,7 @@ package org.opensearch.common.geo; import org.apache.lucene.document.ShapeField.QueryRelation; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ * Enum representing the relationship between a Query / Filter Shape and indexed Shapes * that will be used to determine if a Document should be matched or not * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ShapeRelation implements Writeable { INTERSECTS("intersects"), diff --git a/server/src/main/java/org/opensearch/common/inject/Module.java b/server/src/main/java/org/opensearch/common/inject/Module.java index e66044ff26c40..b1fc031192ea0 100644 --- a/server/src/main/java/org/opensearch/common/inject/Module.java +++ b/server/src/main/java/org/opensearch/common/inject/Module.java @@ -29,8 +29,6 @@ package org.opensearch.common.inject; -import org.opensearch.common.annotation.PublicApi; - /** * A module contributes configuration information, typically interface * bindings, which will be used to create an {@link Injector}. A Guice-based @@ -45,9 +43,8 @@ * Use scope and binding annotations on these methods to configure the * bindings. * - * @opensearch.api + * @opensearch.internal */ -@PublicApi(since = "1.0.0") public interface Module { /** diff --git a/server/src/main/java/org/opensearch/common/io/stream/BytesStreamOutput.java b/server/src/main/java/org/opensearch/common/io/stream/BytesStreamOutput.java index ad09efde72009..bfed1f0883672 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/BytesStreamOutput.java +++ b/server/src/main/java/org/opensearch/common/io/stream/BytesStreamOutput.java @@ -35,6 +35,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.core.common.bytes.BytesArray; @@ -49,8 +50,9 @@ * A @link {@link StreamOutput} that uses {@link BigArrays} to acquire pages of * bytes, which avoids frequent reallocation & copying of the internal data. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BytesStreamOutput extends BytesStream { protected final BigArrays bigArrays; diff --git a/server/src/main/java/org/opensearch/common/logging/DeprecationLogger.java b/server/src/main/java/org/opensearch/common/logging/DeprecationLogger.java index 98b4ebbc330da..d4dbb953ffe12 100644 --- a/server/src/main/java/org/opensearch/common/logging/DeprecationLogger.java +++ b/server/src/main/java/org/opensearch/common/logging/DeprecationLogger.java @@ -35,6 +35,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; /** * A logger that logs deprecation notices. Logger should be initialized with a parent logger which name will be used @@ -50,8 +51,9 @@ * key is combined with the X-Opaque-Id request header value, if supplied, which allows for per-client * message limiting. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeprecationLogger { /** @@ -108,8 +110,9 @@ public DeprecationLoggerBuilder deprecate(final String key, final String msg, fi /** * The builder for the deprecation logger * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public class DeprecationLoggerBuilder { public DeprecationLoggerBuilder withDeprecation(String key, String msg, Object[] params) { diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 62e8faf33e1fa..b34a2aaffe408 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.Loggers; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.util.FeatureFlags; @@ -72,8 +73,9 @@ * Encapsulates all valid index level settings. * @see Property#IndexScope * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexScopedSettings extends AbstractScopedSettings { public static final Predicate INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetadata.INDEX_SETTING_PREFIX); diff --git a/server/src/main/java/org/opensearch/common/unit/Fuzziness.java b/server/src/main/java/org/opensearch/common/unit/Fuzziness.java index 2ce86cbfd13f3..d16ec9723fb2b 100644 --- a/server/src/main/java/org/opensearch/common/unit/Fuzziness.java +++ b/server/src/main/java/org/opensearch/common/unit/Fuzziness.java @@ -32,6 +32,7 @@ package org.opensearch.common.unit; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -49,8 +50,9 @@ * parsing and conversion from similarities to edit distances * etc. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Fuzziness implements ToXContentFragment, Writeable { public static final String X_FIELD_NAME = "fuzziness"; diff --git a/server/src/main/java/org/opensearch/common/unit/SizeValue.java b/server/src/main/java/org/opensearch/common/unit/SizeValue.java index 766199ebbc8f8..14f2bedde53f8 100644 --- a/server/src/main/java/org/opensearch/common/unit/SizeValue.java +++ b/server/src/main/java/org/opensearch/common/unit/SizeValue.java @@ -33,6 +33,7 @@ package org.opensearch.common.unit; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * Conversion values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SizeValue implements Writeable, Comparable { private final long size; diff --git a/server/src/main/java/org/opensearch/http/HttpChannel.java b/server/src/main/java/org/opensearch/http/HttpChannel.java index 679a5d73c7837..ed20ec89a9099 100644 --- a/server/src/main/java/org/opensearch/http/HttpChannel.java +++ b/server/src/main/java/org/opensearch/http/HttpChannel.java @@ -32,6 +32,7 @@ package org.opensearch.http; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.network.CloseableChannel; import org.opensearch.core.action.ActionListener; @@ -41,8 +42,9 @@ /** * Represents an HTTP comms channel * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface HttpChannel extends CloseableChannel { /** * Notify HTTP channel that exception happens and the response may not be sent (for example, timeout) diff --git a/server/src/main/java/org/opensearch/http/HttpRequest.java b/server/src/main/java/org/opensearch/http/HttpRequest.java index 162ef11a3d865..3dc10777b657a 100644 --- a/server/src/main/java/org/opensearch/http/HttpRequest.java +++ b/server/src/main/java/org/opensearch/http/HttpRequest.java @@ -33,6 +33,7 @@ package org.opensearch.http; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.RestRequest; @@ -45,15 +46,17 @@ * A basic http request abstraction. Http modules needs to implement this interface to integrate with the * server package's rest handling. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface HttpRequest { /** * Which HTTP version being used * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum HttpVersion { HTTP_1_0, HTTP_1_1 diff --git a/server/src/main/java/org/opensearch/http/HttpResponse.java b/server/src/main/java/org/opensearch/http/HttpResponse.java index 3c8269f4400e7..b25df41698c79 100644 --- a/server/src/main/java/org/opensearch/http/HttpResponse.java +++ b/server/src/main/java/org/opensearch/http/HttpResponse.java @@ -32,12 +32,15 @@ package org.opensearch.http; +import org.opensearch.common.annotation.PublicApi; + /** * A basic http response abstraction. Http modules must implement this interface as the server package rest * handling needs to set http headers for a response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface HttpResponse { void addHeader(String name, String value); diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index e29283724ebf8..545623287bae8 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -496,8 +496,9 @@ IndexEventListener freeze() { // pkg private for testing /** * Type of file system * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Type { HYBRIDFS("hybridfs"), NIOFS("niofs"), diff --git a/server/src/main/java/org/opensearch/index/IndexSortConfig.java b/server/src/main/java/org/opensearch/index/IndexSortConfig.java index 9edb268a5126c..af2e22c4aad53 100644 --- a/server/src/main/java/org/opensearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/opensearch/index/IndexSortConfig.java @@ -36,6 +36,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.index.fielddata.IndexFieldData; @@ -69,8 +70,10 @@ * * * - * @opensearch.internal -**/ + * + * @opensearch.api + */ +@PublicApi(since = "1.0.0") public final class IndexSortConfig { /** * The list of field names diff --git a/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java b/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java index b2b7781a20d26..a93a362a70c78 100644 --- a/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java +++ b/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java @@ -33,6 +33,7 @@ package org.opensearch.index; import org.apache.lucene.index.ConcurrentMergeScheduler; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; @@ -64,8 +65,9 @@ * move higher to allow merges to keep up with ongoing indexing. * * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class MergeSchedulerConfig { public static final Setting MAX_THREAD_COUNT_SETTING = new Setting<>( diff --git a/server/src/main/java/org/opensearch/index/ReplicationStats.java b/server/src/main/java/org/opensearch/index/ReplicationStats.java index 0ae4526365bf1..8987a492e9a90 100644 --- a/server/src/main/java/org/opensearch/index/ReplicationStats.java +++ b/server/src/main/java/org/opensearch/index/ReplicationStats.java @@ -8,6 +8,7 @@ package org.opensearch.index; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -20,8 +21,9 @@ * ReplicationStats is used to provide segment replication statistics at an index, * node and cluster level on a segment replication enabled cluster. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.10.0") public class ReplicationStats implements ToXContentFragment, Writeable { public long maxBytesBehind; diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPerGroupStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPerGroupStats.java index c3b4f8217c961..884686ee48fa1 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationPerGroupStats.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPerGroupStats.java @@ -8,6 +8,7 @@ package org.opensearch.index; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -21,8 +22,9 @@ /** * Return Segment Replication stats for a Replication Group. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class SegmentReplicationPerGroupStats implements Writeable, ToXContentFragment { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java index 2be0c712f64ef..e381ade253422 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java @@ -9,6 +9,7 @@ package org.opensearch.index; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -23,8 +24,9 @@ /** * SegRep stats for a single shard. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class SegmentReplicationShardStats implements Writeable, ToXContentFragment { private final String allocationId; private final long checkpointsBehindCount; diff --git a/server/src/main/java/org/opensearch/index/VersionType.java b/server/src/main/java/org/opensearch/index/VersionType.java index 8aa1fbd1b81ac..01cf73ca950d0 100644 --- a/server/src/main/java/org/opensearch/index/VersionType.java +++ b/server/src/main/java/org/opensearch/index/VersionType.java @@ -31,6 +31,7 @@ package org.opensearch.index; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * Types of index versions * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum VersionType implements Writeable { INTERNAL((byte) 0) { @Override diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalysisMode.java b/server/src/main/java/org/opensearch/index/analysis/AnalysisMode.java index 4385680d9eb93..af71b470711a0 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalysisMode.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalysisMode.java @@ -32,13 +32,16 @@ package org.opensearch.index.analysis; +import org.opensearch.common.annotation.PublicApi; + /** * Enum representing the mode in which token filters and analyzers are allowed to operate. * While most token filters are allowed both in index and search time analyzers, some are * restricted to be used only at index time, others at search time. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum AnalysisMode { /** diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalyzerScope.java b/server/src/main/java/org/opensearch/index/analysis/AnalyzerScope.java index 0ee51b32aab46..b24d932123b44 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalyzerScope.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalyzerScope.java @@ -32,11 +32,14 @@ package org.opensearch.index.analysis; +import org.opensearch.common.annotation.PublicApi; + /** * Enum to identify the scope of an analyzer * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum AnalyzerScope { INDEX, INDICES, diff --git a/server/src/main/java/org/opensearch/index/analysis/NameOrDefinition.java b/server/src/main/java/org/opensearch/index/analysis/NameOrDefinition.java index 8bc5c2ce916be..b425f17a85d6a 100644 --- a/server/src/main/java/org/opensearch/index/analysis/NameOrDefinition.java +++ b/server/src/main/java/org/opensearch/index/analysis/NameOrDefinition.java @@ -32,6 +32,7 @@ package org.opensearch.index.analysis; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,8 +49,9 @@ /** * Provides the name and settings for an analyzer * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NameOrDefinition implements Writeable, ToXContentFragment { // exactly one of these two members is not null public final String name; diff --git a/server/src/main/java/org/opensearch/index/analysis/NamedAnalyzer.java b/server/src/main/java/org/opensearch/index/analysis/NamedAnalyzer.java index 2a88d375a7df8..07523f9ee6dc5 100644 --- a/server/src/main/java/org/opensearch/index/analysis/NamedAnalyzer.java +++ b/server/src/main/java/org/opensearch/index/analysis/NamedAnalyzer.java @@ -34,6 +34,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.mapper.MapperException; import java.util.ArrayList; @@ -44,8 +45,9 @@ * Named analyzer is an analyzer wrapper around an actual analyzer ({@link #analyzer} that is associated * with a name ({@link #name()}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NamedAnalyzer extends DelegatingAnalyzerWrapper { private final String name; diff --git a/server/src/main/java/org/opensearch/index/cache/query/QueryCacheStats.java b/server/src/main/java/org/opensearch/index/cache/query/QueryCacheStats.java index 88077aafb1495..d844e5cbb8897 100644 --- a/server/src/main/java/org/opensearch/index/cache/query/QueryCacheStats.java +++ b/server/src/main/java/org/opensearch/index/cache/query/QueryCacheStats.java @@ -33,6 +33,7 @@ package org.opensearch.index.cache.query; import org.apache.lucene.search.DocIdSet; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -46,8 +47,9 @@ /** * Stats for the query cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class QueryCacheStats implements Writeable, ToXContentFragment { private long ramBytesUsed; diff --git a/server/src/main/java/org/opensearch/index/cache/request/RequestCacheStats.java b/server/src/main/java/org/opensearch/index/cache/request/RequestCacheStats.java index 24f68899c2ac7..6def55fcb985b 100644 --- a/server/src/main/java/org/opensearch/index/cache/request/RequestCacheStats.java +++ b/server/src/main/java/org/opensearch/index/cache/request/RequestCacheStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.cache.request; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Request for the query cache statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RequestCacheStats implements Writeable, ToXContentFragment { private long memorySize; diff --git a/server/src/main/java/org/opensearch/index/engine/CommitStats.java b/server/src/main/java/org/opensearch/index/engine/CommitStats.java index 4328ebed85aa1..b30ce720b2649 100644 --- a/server/src/main/java/org/opensearch/index/engine/CommitStats.java +++ b/server/src/main/java/org/opensearch/index/engine/CommitStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.engine; import org.apache.lucene.index.SegmentInfos; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.io.stream.StreamInput; @@ -47,8 +48,9 @@ /** * a class the returns dynamic information with respect to the last commit point of this shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class CommitStats implements Writeable, ToXContentFragment { private final Map userData; diff --git a/server/src/main/java/org/opensearch/index/engine/Segment.java b/server/src/main/java/org/opensearch/index/engine/Segment.java index 41e6d79e8a4be..7881abcf58e0c 100644 --- a/server/src/main/java/org/opensearch/index/engine/Segment.java +++ b/server/src/main/java/org/opensearch/index/engine/Segment.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.SortedSetSortField; import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -53,8 +54,9 @@ /** * A segment in the engine * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Segment implements Writeable { private String name; diff --git a/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java b/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java index f4fd2490c7abe..d4a97f0267222 100644 --- a/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java @@ -33,6 +33,7 @@ package org.opensearch.index.engine; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -50,8 +51,9 @@ /** * Tracker for segment stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SegmentsStats implements Writeable, ToXContentFragment { private long count; diff --git a/server/src/main/java/org/opensearch/index/fielddata/FieldDataStats.java b/server/src/main/java/org/opensearch/index/fielddata/FieldDataStats.java index fc7db7d316214..85b435e969bfa 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/FieldDataStats.java +++ b/server/src/main/java/org/opensearch/index/fielddata/FieldDataStats.java @@ -34,6 +34,7 @@ import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -47,8 +48,9 @@ /** * Encapsulates heap usage for field data * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FieldDataStats implements Writeable, ToXContentFragment { private static final String FIELDDATA = "fielddata"; diff --git a/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java index 5860cfe8a47c3..eb3462743593d 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java @@ -37,6 +37,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.opensearch.common.Numbers; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; import org.opensearch.common.time.DateUtils; @@ -60,8 +61,9 @@ * return as a single {@link ScriptDocValues} instance can be reused to return * values form multiple documents. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ScriptDocValues extends AbstractList { /** diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortedBinaryDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortedBinaryDocValues.java index bd66d1d94f33c..ba1b890f1ad1a 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortedBinaryDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortedBinaryDocValues.java @@ -33,6 +33,7 @@ package org.opensearch.index.fielddata; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; @@ -41,8 +42,9 @@ * according to {@link BytesRef#compareTo(BytesRef)}. * There might be dups however. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") // TODO: Should it expose a count (current approach) or return null when there are no more values? public abstract class SortedBinaryDocValues { diff --git a/server/src/main/java/org/opensearch/index/flush/FlushStats.java b/server/src/main/java/org/opensearch/index/flush/FlushStats.java index aec6e9199729d..9bce46d1dd9d5 100644 --- a/server/src/main/java/org/opensearch/index/flush/FlushStats.java +++ b/server/src/main/java/org/opensearch/index/flush/FlushStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.flush; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Encapsulates statistics for flush * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FlushStats implements Writeable, ToXContentFragment { private long total; diff --git a/server/src/main/java/org/opensearch/index/get/GetResult.java b/server/src/main/java/org/opensearch/index/get/GetResult.java index f0f1a86cb16e7..c0dd1cd2ecb30 100644 --- a/server/src/main/java/org/opensearch/index/get/GetResult.java +++ b/server/src/main/java/org/opensearch/index/get/GetResult.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.document.DocumentField; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.Strings; @@ -68,6 +69,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetResult implements Writeable, Iterable, ToXContentObject { public static final String _INDEX = "_index"; diff --git a/server/src/main/java/org/opensearch/index/get/GetStats.java b/server/src/main/java/org/opensearch/index/get/GetStats.java index 5530a45717305..a366014fe228e 100644 --- a/server/src/main/java/org/opensearch/index/get/GetStats.java +++ b/server/src/main/java/org/opensearch/index/get/GetStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,6 +47,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetStats implements Writeable, ToXContentFragment { private long existsCount; diff --git a/server/src/main/java/org/opensearch/index/merge/MergeStats.java b/server/src/main/java/org/opensearch/index/merge/MergeStats.java index a284cec247ff1..7ecaed60735b4 100644 --- a/server/src/main/java/org/opensearch/index/merge/MergeStats.java +++ b/server/src/main/java/org/opensearch/index/merge/MergeStats.java @@ -33,6 +33,7 @@ package org.opensearch.index.merge; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * Stores stats about a merge process * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MergeStats implements Writeable, ToXContentFragment { private long total; diff --git a/server/src/main/java/org/opensearch/index/query/IntervalMode.java b/server/src/main/java/org/opensearch/index/query/IntervalMode.java index 454d867b41da2..a2979135ac03f 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalMode.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalMode.java @@ -9,6 +9,7 @@ package org.opensearch.index.query; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -18,8 +19,9 @@ /** * Mode for Text and Mapped Field Types * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum IntervalMode implements Writeable { ORDERED(0), UNORDERED(1), diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java index 090f74c5be7fe..0cdf7f31c2ebf 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java @@ -33,6 +33,7 @@ package org.opensearch.index.query; import org.apache.lucene.search.Query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.xcontent.ToXContentObject; @@ -41,8 +42,9 @@ /** * Foundation class for all OpenSearch query builders * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface QueryBuilder extends NamedWriteable, ToXContentObject, Rewriteable { /** diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java b/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java index af5a125f9dd95..b40dcca17e45b 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java @@ -9,10 +9,14 @@ package org.opensearch.index.query; import org.apache.lucene.search.BooleanClause; +import org.opensearch.common.annotation.PublicApi; /** * QueryBuilderVisitor is an interface to define Visitor Object to be traversed in QueryBuilder tree. + * + * @opensearch.api */ +@PublicApi(since = "2.11.0") public interface QueryBuilderVisitor { /** diff --git a/server/src/main/java/org/opensearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/opensearch/index/query/QueryRewriteContext.java index 1f7805ec99cd4..15a6d0b5a774e 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryRewriteContext.java @@ -32,6 +32,7 @@ package org.opensearch.index.query; import org.opensearch.client.Client; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -46,8 +47,9 @@ /** * Context object used to rewrite {@link QueryBuilder} instances into simplified version. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class QueryRewriteContext { private final NamedXContentRegistry xContentRegistry; private final NamedWriteableRegistry writeableRegistry; diff --git a/server/src/main/java/org/opensearch/index/recovery/RecoveryStats.java b/server/src/main/java/org/opensearch/index/recovery/RecoveryStats.java index 675f7c08c827c..48fe75eb54280 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RecoveryStats.java +++ b/server/src/main/java/org/opensearch/index/recovery/RecoveryStats.java @@ -31,6 +31,7 @@ package org.opensearch.index.recovery; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ * Recovery related statistics, starting at the shard level and allowing aggregation to * indices and node level * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryStats implements ToXContentFragment, Writeable { private final AtomicInteger currentAsSource = new AtomicInteger(); diff --git a/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java b/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java index 322271ace82b5..489ac386f72a0 100644 --- a/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java +++ b/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.refresh; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Encapsulates stats for index refresh * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RefreshStats implements Writeable, ToXContentFragment { private long total; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java index 4228ec60c4524..fc1f245829b84 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java @@ -10,6 +10,7 @@ import org.opensearch.Version; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -26,8 +27,9 @@ * Tracks remote store segment download and upload stats * Used for displaying remote store stats in IndicesStats/NodeStats API * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.10.0") public class RemoteSegmentStats implements Writeable, ToXContentFragment { /** * Cumulative bytes attempted to be uploaded to remote store diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index fb65d9ef83be2..fe9440813b94f 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.Streak; import org.opensearch.common.util.concurrent.ConcurrentCollections; @@ -36,8 +37,9 @@ /** * Keeps track of remote refresh which happens in {@link org.opensearch.index.shard.RemoteStoreRefreshListener}. This consist of multiple critical metrics. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.10.0") public class RemoteSegmentTransferTracker extends RemoteTransferTracker { private final Logger logger; @@ -391,8 +393,9 @@ public RemoteSegmentTransferTracker.Stats stats() { /** * Represents the tracker's state as seen in the stats API. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "2.10.0") public static class Stats implements Writeable { public final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java index 4214a87049350..25bee10e1fbe3 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java @@ -8,6 +8,7 @@ package org.opensearch.index.remote; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.MovingAverage; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -22,8 +23,9 @@ /** * Stores Remote Translog Store-related stats for a given IndexShard. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.10.0") public class RemoteTranslogTransferTracker extends RemoteTransferTracker { /** * Epoch timestamp of the last successful Remote Translog Store upload. @@ -292,8 +294,9 @@ public String toString() { /** * Represents the tracker's state as seen in the stats API. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "2.10.0") public static class Stats implements Writeable { final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index 1f9144b28f286..b13f1eb04a941 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -36,6 +36,7 @@ import org.opensearch.action.search.SearchPhaseName; import org.opensearch.action.search.SearchRequestStats; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; @@ -55,15 +56,17 @@ /** * Encapsulates stats for search time * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchStats implements Writeable, ToXContentFragment { /** * Holds statistic values for a particular phase. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class PhaseStatsLongHolder implements Writeable { long current; @@ -110,8 +113,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Holds requests stats for different phases. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class RequestStatsLongHolder { Map requestStatsHolder = new HashMap<>(); @@ -130,9 +134,9 @@ public Map getRequestStatsHolder() { /** * Holder of statistics values * - * @opensearch.internal + * @opensearch.api */ - + @PublicApi(since = "1.0.0") public static class Stats implements Writeable, ToXContentFragment { private long queryCount; diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLease.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLease.java index 216b654f6ce6a..255ff115555d8 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLease.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLease.java @@ -32,6 +32,7 @@ package org.opensearch.index.seqno; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -51,8 +52,9 @@ * otherwise merge away operations that have been soft deleted). Each retention lease contains a unique identifier, the retaining sequence * number, the timestamp of when the lease was created or renewed, and the source of the retention lease (e.g., "ccr"). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RetentionLease implements ToXContentObject, Writeable { private final String id; diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseStats.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseStats.java index 512ef9bf4f0a5..d34d385c66eb6 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseStats.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.seqno; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -45,8 +46,9 @@ /** * Represents retention lease stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RetentionLeaseStats implements ToXContentFragment, Writeable { private final RetentionLeases retentionLeases; diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeases.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeases.java index 74c4ddb6ebdb5..4e28e19c4b164 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeases.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeases.java @@ -32,6 +32,7 @@ package org.opensearch.index.seqno; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -57,8 +58,9 @@ * Represents a versioned collection of retention leases. We version the collection of retention leases to ensure that sync requests that * arrive out of order on the replica, using the version to ensure that older sync requests are rejected. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RetentionLeases implements ToXContentFragment, Writeable { private final long primaryTerm; diff --git a/server/src/main/java/org/opensearch/index/seqno/SeqNoStats.java b/server/src/main/java/org/opensearch/index/seqno/SeqNoStats.java index be7888ada2801..a8acf1fac7846 100644 --- a/server/src/main/java/org/opensearch/index/seqno/SeqNoStats.java +++ b/server/src/main/java/org/opensearch/index/seqno/SeqNoStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.seqno; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Sequence number statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SeqNoStats implements ToXContentFragment, Writeable { private static final String SEQ_NO = "seq_no"; diff --git a/server/src/main/java/org/opensearch/index/shard/DocsStats.java b/server/src/main/java/org/opensearch/index/shard/DocsStats.java index 83cc69752db2f..4ca475a45c04b 100644 --- a/server/src/main/java/org/opensearch/index/shard/DocsStats.java +++ b/server/src/main/java/org/opensearch/index/shard/DocsStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.shard; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Document statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DocsStats implements Writeable, ToXContentFragment { private long count = 0; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java index 89cbc59403faf..862962dc5467a 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java @@ -33,6 +33,7 @@ package org.opensearch.index.shard; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,22 +51,25 @@ /** * Tracks indexing statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexingStats implements Writeable, ToXContentFragment { /** * Internal statistics for indexing * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Stats implements Writeable, ToXContentFragment { /** * Tracks item level rest category class codes during indexing * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DocStatusStats implements Writeable, ToXContentFragment { final AtomicLong[] docStatusCounter; diff --git a/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java b/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java index 7ad48cb56a33b..2ab615677dedb 100644 --- a/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java @@ -9,6 +9,7 @@ package org.opensearch.index.store; import org.apache.lucene.store.Directory; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.MovingAverage; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -22,8 +23,9 @@ /** * Tracks the amount of bytes transferred between two {@link Directory} instances * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.10.0") public class DirectoryFileTransferTracker { /** * Cumulative size of files (in bytes) attempted to be transferred over from the source {@link Directory} @@ -166,8 +168,9 @@ public DirectoryFileTransferTracker.Stats stats() { /** * Represents the tracker's stats presentable to an API. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "2.10.0") public static class Stats implements Writeable { public final long transferredBytesStarted; public final long transferredBytesFailed; diff --git a/server/src/main/java/org/opensearch/index/store/StoreStats.java b/server/src/main/java/org/opensearch/index/store/StoreStats.java index aa73a2c629515..4763b5e5e8a21 100644 --- a/server/src/main/java/org/opensearch/index/store/StoreStats.java +++ b/server/src/main/java/org/opensearch/index/store/StoreStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.store; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Statistics about an OpenSearch Store * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class StoreStats implements Writeable, ToXContentFragment { /** diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheStats.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheStats.java index ebb6202c86ec9..070fd663896a3 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheStats.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheStats.java @@ -8,6 +8,7 @@ package org.opensearch.index.store.remote.filecache; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -20,8 +21,9 @@ /** * Statistics on file cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class FileCacheStats implements Writeable, ToXContentFragment { private final long timestamp; diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java b/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java index 966f8ebc2875a..03c15e0ea4752 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java @@ -9,6 +9,7 @@ package org.opensearch.index.translog; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -23,8 +24,9 @@ /** * Encapsulates the stats related to Remote Translog Store operations * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.10.0") public class RemoteTranslogStats implements ToXContentFragment, Writeable { /** * Total number of Remote Translog Store uploads that have been started diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index b44aa6e059224..9f80bc38a8222 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -37,6 +37,7 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; @@ -1566,8 +1567,9 @@ public String toString() { /** * How to sync the translog * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Durability { /** diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogStats.java b/server/src/main/java/org/opensearch/index/translog/TranslogStats.java index a4699cea671a0..619dd6371c553 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogStats.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.translog; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * Translog statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TranslogStats implements Writeable, ToXContentFragment { private static final String TRANSLOG = "translog"; private long translogSizeInBytes; diff --git a/server/src/main/java/org/opensearch/index/warmer/WarmerStats.java b/server/src/main/java/org/opensearch/index/warmer/WarmerStats.java index 375dcc029fa70..789baaea20d04 100644 --- a/server/src/main/java/org/opensearch/index/warmer/WarmerStats.java +++ b/server/src/main/java/org/opensearch/index/warmer/WarmerStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.warmer; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Stats collected about the warmer * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class WarmerStats implements Writeable, ToXContentFragment { private long current; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index 0f3025369833d..d91bfc19ee833 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.RateLimiter; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -48,8 +49,9 @@ /** * Settings for the recovery mechanism * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoverySettings { private static final Logger logger = LogManager.getLogger(RecoverySettings.class); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index 6733a29c19db3..80c05399df627 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -54,15 +55,17 @@ /** * Keeps track of state related to shard recovery. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryState implements ReplicationState, ToXContentFragment, Writeable { /** * The stage of the recovery state * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Stage { INIT((byte) 0), @@ -370,8 +373,9 @@ static final class Fields { /** * Verifys the lucene index * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class VerifyIndex extends ReplicationTimer implements ToXContentFragment, Writeable { private volatile long checkIndexTime; @@ -412,8 +416,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * The translog * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Translog extends ReplicationTimer implements ToXContentFragment, Writeable { public static final int UNKNOWN = -1; diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java index 9b5b1f4a1468e..5fa123948c5ac 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java @@ -10,6 +10,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -29,15 +30,17 @@ /** * ReplicationState implementation to track Segment Replication events. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public class SegmentReplicationState implements ReplicationState, ToXContentFragment, Writeable { /** * The stage of the recovery state * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "2.2.0") public enum Stage { DONE((byte) 0), INIT((byte) 1), diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationLuceneIndex.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationLuceneIndex.java index 5793356678715..cc52d2bafc614 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationLuceneIndex.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationLuceneIndex.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication.common; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; @@ -32,8 +33,9 @@ * Represents the Lucene Index (set of files on a single shard) involved * in the replication process. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ReplicationLuceneIndex extends ReplicationTimer implements ToXContentFragment, Writeable { private final FilesDetails filesDetails; @@ -393,8 +395,9 @@ public boolean isComplete() { /** * Metadata about a file * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class FileMetadata implements ToXContentObject, Writeable { private String name; private long length; diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTimer.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTimer.java index aeb9ef29f4a52..d884e1676f2be 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTimer.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTimer.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication.common; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -19,8 +20,9 @@ * A serializable timer that is used to measure the time taken for * file replication operations like recovery. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ReplicationTimer implements Writeable { private long startTime = 0; private long startNanoTime = 0; diff --git a/server/src/main/java/org/opensearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/opensearch/ingest/PipelineConfiguration.java index d59d9d6da5717..477be3e74f1c7 100644 --- a/server/src/main/java/org/opensearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/opensearch/ingest/PipelineConfiguration.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; @@ -56,8 +57,9 @@ /** * Encapsulates a pipeline's id and configuration as a blob * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class PipelineConfiguration extends AbstractDiffable implements ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>("pipeline_config", true, Builder::new); diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java index 8446ab0dd6166..fc1bb86f2ad3e 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -51,15 +52,17 @@ /** * FileSystem information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FsInfo implements Iterable, Writeable, ToXContentFragment { /** * Path for the file system * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Path implements Writeable, ToXContentObject { String path; @@ -220,8 +223,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * The device status. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DeviceStats implements Writeable, ToXContentFragment { final int majorDeviceNumber; @@ -463,8 +467,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * The I/O statistics. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class IoStats implements Writeable, ToXContentFragment { private static final String OPERATIONS = "operations"; diff --git a/server/src/main/java/org/opensearch/monitor/os/OsStats.java b/server/src/main/java/org/opensearch/monitor/os/OsStats.java index 697b86c6ba55c..cdcec733cb086 100644 --- a/server/src/main/java/org/opensearch/monitor/os/OsStats.java +++ b/server/src/main/java/org/opensearch/monitor/os/OsStats.java @@ -34,6 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -48,8 +49,9 @@ /** * Holds stats for the Operating System * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OsStats implements Writeable, ToXContentFragment { private final long timestamp; @@ -143,8 +145,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * CPU Information. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Cpu implements Writeable, ToXContentFragment { private final short percent; @@ -208,8 +211,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Swap information. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Swap implements Writeable, ToXContentFragment { private static final Logger logger = LogManager.getLogger(Swap.class); @@ -276,8 +280,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * OS Memory information. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Mem implements Writeable, ToXContentFragment { private static final Logger logger = LogManager.getLogger(Mem.class); @@ -353,8 +358,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Encapsulates basic cgroup statistics. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Cgroup implements Writeable, ToXContentFragment { private final String cpuAcctControlGroup; @@ -546,8 +552,9 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa /** * Encapsulates CPU time statistics. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class CpuStat implements Writeable, ToXContentFragment { private final long numberOfElapsedPeriods; diff --git a/server/src/main/java/org/opensearch/plugins/CryptoKeyProviderPlugin.java b/server/src/main/java/org/opensearch/plugins/CryptoKeyProviderPlugin.java index 47dc75c7c908a..d9b5f0d79ec8c 100644 --- a/server/src/main/java/org/opensearch/plugins/CryptoKeyProviderPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/CryptoKeyProviderPlugin.java @@ -14,7 +14,8 @@ /** * Crypto plugin to provide support for custom key providers. - * @opensearch.api + * + * @opensearch.experimental */ @ExperimentalApi public interface CryptoKeyProviderPlugin { diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java index 67c0542943e95..dc8fd6e604d72 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.bootstrap.JarHell; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -59,6 +60,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PluginInfo implements Writeable, ToXContentObject { public static final String OPENSEARCH_PLUGIN_PROPERTIES = "plugin-descriptor.properties"; diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java b/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java index b24e0dddd852a..cfcbc6bb88e66 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java @@ -8,6 +8,7 @@ package org.opensearch.repositories; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -19,8 +20,11 @@ import java.util.List; /** - * Encapsulates stats for multiple repositories* + * Encapsulates stats for multiple repositories + * + * @opensearch.api */ +@PublicApi(since = "2.11.0") public class RepositoriesStats implements Writeable, ToXContentObject { List repositoryStatsSnapshots; diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryCleanupResult.java b/server/src/main/java/org/opensearch/repositories/RepositoryCleanupResult.java index f09aae195f0e5..eeddf4bd76659 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryCleanupResult.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryCleanupResult.java @@ -31,6 +31,7 @@ package org.opensearch.repositories; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.DeleteResult; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; @@ -47,8 +48,9 @@ /** * Result of a repository cleanup action * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RepositoryCleanupResult implements Writeable, ToXContentObject { public static final ObjectParser PARSER = new ObjectParser<>( diff --git a/server/src/main/java/org/opensearch/rest/RestRequest.java b/server/src/main/java/org/opensearch/rest/RestRequest.java index f64774686c89d..2c397f7fc6e8e 100644 --- a/server/src/main/java/org/opensearch/rest/RestRequest.java +++ b/server/src/main/java/org/opensearch/rest/RestRequest.java @@ -37,6 +37,7 @@ import org.opensearch.common.CheckedConsumer; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.LoggingDeprecationHandler; @@ -72,6 +73,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestRequest implements ToXContent.Params { // tchar pattern as defined by RFC7230 section 3.2.6 @@ -231,8 +233,9 @@ public static RestRequest requestWithoutParameters( /** * The method used. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Method { GET, POST, diff --git a/server/src/main/java/org/opensearch/script/ScriptCacheStats.java b/server/src/main/java/org/opensearch/script/ScriptCacheStats.java index be7a1c9542a60..d06ffa70235f8 100644 --- a/server/src/main/java/org/opensearch/script/ScriptCacheStats.java +++ b/server/src/main/java/org/opensearch/script/ScriptCacheStats.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.DeprecatedApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -52,6 +53,7 @@ * * @deprecated This class is deprecated in favor of ScriptStats and ScriptContextStats. It is removed in OpenSearch 2.0. */ +@DeprecatedApi(since = "2.0.0") @Deprecated public class ScriptCacheStats implements Writeable, ToXContentFragment { private final Map context; diff --git a/server/src/main/java/org/opensearch/script/ScriptContextInfo.java b/server/src/main/java/org/opensearch/script/ScriptContextInfo.java index d3a64c207e0ce..ee4c0aa09388e 100644 --- a/server/src/main/java/org/opensearch/script/ScriptContextInfo.java +++ b/server/src/main/java/org/opensearch/script/ScriptContextInfo.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -60,8 +61,9 @@ /** * Information about a script context * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ScriptContextInfo implements ToXContentObject, Writeable { public final String name; public final ScriptMethodInfo execute; @@ -202,8 +204,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Script method information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ScriptMethodInfo implements ToXContentObject, Writeable { public final String name, returnType; public final List parameters; diff --git a/server/src/main/java/org/opensearch/script/ScriptContextStats.java b/server/src/main/java/org/opensearch/script/ScriptContextStats.java index 16f4a7e313326..15a5596609586 100644 --- a/server/src/main/java/org/opensearch/script/ScriptContextStats.java +++ b/server/src/main/java/org/opensearch/script/ScriptContextStats.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Stats for a script context * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ScriptContextStats implements Writeable, ToXContentFragment, Comparable { private final String context; private final long compilations; diff --git a/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java b/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java index 30756ff702e8f..1a6626db64811 100644 --- a/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java +++ b/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -91,8 +92,9 @@ * } * * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ScriptLanguagesInfo implements ToXContentObject, Writeable { private static final ParseField TYPES_ALLOWED = new ParseField("types_allowed"); private static final ParseField LANGUAGE_CONTEXTS = new ParseField("language_contexts"); diff --git a/server/src/main/java/org/opensearch/script/ScriptStats.java b/server/src/main/java/org/opensearch/script/ScriptStats.java index aeb3645242799..850f7f9d07070 100644 --- a/server/src/main/java/org/opensearch/script/ScriptStats.java +++ b/server/src/main/java/org/opensearch/script/ScriptStats.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -48,8 +49,9 @@ /** * Stats for scripts * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ScriptStats implements Writeable, ToXContentFragment { private final List contextStats; private final long compilations; diff --git a/server/src/main/java/org/opensearch/script/StoredScriptSource.java b/server/src/main/java/org/opensearch/script/StoredScriptSource.java index d1dae67d0e55f..fc19022657f9e 100644 --- a/server/src/main/java/org/opensearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/opensearch/script/StoredScriptSource.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; @@ -65,8 +66,9 @@ * {@link StoredScriptSource} represents user-defined parameters for a script * saved in the {@link ClusterState}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class StoredScriptSource extends AbstractDiffable implements Writeable, ToXContentObject { /** diff --git a/server/src/main/java/org/opensearch/search/Scroll.java b/server/src/main/java/org/opensearch/search/Scroll.java index 8003316a57e5e..3e634a378717f 100644 --- a/server/src/main/java/org/opensearch/search/Scroll.java +++ b/server/src/main/java/org/opensearch/search/Scroll.java @@ -32,6 +32,7 @@ package org.opensearch.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ * A scroll enables scrolling of search request. It holds a {@link #keepAlive()} time that * will control how long to keep the scrolling resources open. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Scroll implements Writeable { private final TimeValue keepAlive; diff --git a/server/src/main/java/org/opensearch/search/SearchExtBuilder.java b/server/src/main/java/org/opensearch/search/SearchExtBuilder.java index 1df58666f6fdb..557269a1e45b1 100644 --- a/server/src/main/java/org/opensearch/search/SearchExtBuilder.java +++ b/server/src/main/java/org/opensearch/search/SearchExtBuilder.java @@ -33,6 +33,7 @@ package org.opensearch.search; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -56,8 +57,9 @@ * * @see SearchExtSpec * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SearchExtBuilder implements NamedWriteable, ToXContentFragment { public abstract int hashCode(); diff --git a/server/src/main/java/org/opensearch/search/builder/PointInTimeBuilder.java b/server/src/main/java/org/opensearch/search/builder/PointInTimeBuilder.java index c50f3c75213dd..20bdb71fd1923 100644 --- a/server/src/main/java/org/opensearch/search/builder/PointInTimeBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/PointInTimeBuilder.java @@ -33,6 +33,7 @@ package org.opensearch.search.builder; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -50,8 +51,9 @@ * A search request with a point in time will execute using the reader contexts associated with that point time * instead of the latest reader contexts. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.3.0") public final class PointInTimeBuilder implements Writeable, ToXContentObject { private static final ParseField ID_FIELD = new ParseField("id"); private static final ParseField KEEP_ALIVE_FIELD = new ParseField("keep_alive"); diff --git a/server/src/main/java/org/opensearch/search/collapse/CollapseBuilder.java b/server/src/main/java/org/opensearch/search/collapse/CollapseBuilder.java index 73802cbd7a849..a9ff55b3a90b1 100644 --- a/server/src/main/java/org/opensearch/search/collapse/CollapseBuilder.java +++ b/server/src/main/java/org/opensearch/search/collapse/CollapseBuilder.java @@ -31,6 +31,7 @@ package org.opensearch.search.collapse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.Strings; @@ -57,8 +58,9 @@ /** * A builder that enables field collapsing on search request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CollapseBuilder implements Writeable, ToXContentObject { public static final ParseField FIELD_FIELD = new ParseField("field"); public static final ParseField INNER_HITS_FIELD = new ParseField("inner_hits"); diff --git a/server/src/main/java/org/opensearch/search/collapse/CollapseContext.java b/server/src/main/java/org/opensearch/search/collapse/CollapseContext.java index 09a612a15d762..79b90d92f4daa 100644 --- a/server/src/main/java/org/opensearch/search/collapse/CollapseContext.java +++ b/server/src/main/java/org/opensearch/search/collapse/CollapseContext.java @@ -33,6 +33,7 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.grouping.CollapsingTopDocsCollector; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; @@ -43,8 +44,9 @@ /** * Context used for field collapsing * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CollapseContext { private final String fieldName; private final MappedFieldType fieldType; diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourceContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourceContext.java index f0ebfd6ab510a..337576890e663 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourceContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourceContext.java @@ -33,6 +33,7 @@ package org.opensearch.search.fetch.subphase; import org.opensearch.common.Booleans; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -55,8 +56,9 @@ /** * Context used to fetch the {@code _source}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FetchSourceContext implements Writeable, ToXContentObject { public static final ParseField INCLUDES_FIELD = new ParseField("includes", "include"); diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java index cc0723ed7a432..0e7c3cf30ccec 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.vectorhighlight.SimpleBoundaryScanner; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -68,8 +69,9 @@ * * @see org.opensearch.search.builder.SearchSourceBuilder#highlight() * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class HighlightBuilder extends AbstractHighlighterBuilder { /** default for whether to highlight fields based on the source even if stored separately */ public static final boolean DEFAULT_FORCE_SOURCE = false; @@ -476,8 +478,9 @@ public HighlightBuilder rewrite(QueryRewriteContext ctx) throws IOException { /** * Field for highlight builder * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Field extends AbstractHighlighterBuilder { static final NamedObjectParser PARSER; static { @@ -584,8 +587,9 @@ public Field rewrite(QueryRewriteContext ctx) throws IOException { /** * Order for highlight builder * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Order implements Writeable { NONE, SCORE; @@ -615,8 +619,9 @@ public String toString() { /** * Boundary scanner type * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum BoundaryScannerType implements Writeable { CHARS, WORD, diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/SearchHighlightContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/SearchHighlightContext.java index 7464ba094b97e..c3d34dbdef56c 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/SearchHighlightContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/SearchHighlightContext.java @@ -33,6 +33,7 @@ package org.opensearch.search.fetch.subphase.highlight; import org.apache.lucene.search.Query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder.BoundaryScannerType; import java.util.Arrays; @@ -46,8 +47,9 @@ /** * Search context used during highlighting phase * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchHighlightContext { private final Map fields; @@ -82,8 +84,9 @@ public boolean forceSource(Field field) { /** * Field for the search highlight context * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Field { private final String field; private final FieldOptions fieldOptions; @@ -107,8 +110,9 @@ public FieldOptions fieldOptions() { /** * Field options for the search highlight context * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class FieldOptions { // Field options that default to null or -1 are often set to their real default in HighlighterParseElement#parse diff --git a/server/src/main/java/org/opensearch/search/internal/AliasFilter.java b/server/src/main/java/org/opensearch/search/internal/AliasFilter.java index c1d425078049c..1732c0ab0db8a 100644 --- a/server/src/main/java/org/opensearch/search/internal/AliasFilter.java +++ b/server/src/main/java/org/opensearch/search/internal/AliasFilter.java @@ -32,6 +32,7 @@ package org.opensearch.search.internal; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * Represents a {@link QueryBuilder} and a list of alias names that filters the builder is composed of. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class AliasFilter implements Writeable, Rewriteable { private final String[] aliases; diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java index bc82814e88bbd..2ed770be60458 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java @@ -11,6 +11,7 @@ import org.opensearch.Version; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; @@ -33,7 +34,10 @@ * TODO: Copied verbatim from {@link org.opensearch.ingest.PipelineConfiguration}. *

    * See if we can refactor into a common class. I suspect not, just because this one will hold + * + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class PipelineConfiguration extends AbstractDiffable implements ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>( "pipeline_config", diff --git a/server/src/main/java/org/opensearch/search/profile/NetworkTime.java b/server/src/main/java/org/opensearch/search/profile/NetworkTime.java index 45d8c2883cb4a..89a8836d807ae 100644 --- a/server/src/main/java/org/opensearch/search/profile/NetworkTime.java +++ b/server/src/main/java/org/opensearch/search/profile/NetworkTime.java @@ -9,6 +9,7 @@ package org.opensearch.search.profile; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -18,8 +19,9 @@ /** * Utility class to track time of network operations * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NetworkTime implements Writeable { private long inboundNetworkTime; private long outboundNetworkTime; diff --git a/server/src/main/java/org/opensearch/search/profile/ProfileResult.java b/server/src/main/java/org/opensearch/search/profile/ProfileResult.java index 6019bc0479c21..2c0d2cf3ba78a 100644 --- a/server/src/main/java/org/opensearch/search/profile/ProfileResult.java +++ b/server/src/main/java/org/opensearch/search/profile/ProfileResult.java @@ -33,6 +33,7 @@ package org.opensearch.search.profile; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -64,8 +65,9 @@ * Each InternalProfileResult has a List of InternalProfileResults, which will contain * "children" queries if applicable * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ProfileResult implements Writeable, ToXContentObject { static final ParseField TYPE = new ParseField("type"); static final ParseField DESCRIPTION = new ParseField("description"); diff --git a/server/src/main/java/org/opensearch/search/profile/ProfileShardResult.java b/server/src/main/java/org/opensearch/search/profile/ProfileShardResult.java index 502d8e4852588..8ff622152ee70 100644 --- a/server/src/main/java/org/opensearch/search/profile/ProfileShardResult.java +++ b/server/src/main/java/org/opensearch/search/profile/ProfileShardResult.java @@ -32,6 +32,7 @@ package org.opensearch.search.profile; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -46,8 +47,9 @@ /** * Shard level profile results * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ProfileShardResult implements Writeable { private final List queryProfileResults; diff --git a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResult.java b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResult.java index 046e929821ab5..6cb92b3efaac3 100644 --- a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResult.java +++ b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResult.java @@ -32,6 +32,7 @@ package org.opensearch.search.profile.aggregation; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -51,8 +52,9 @@ * A container class to hold the profile results for a single shard in the request. * Contains a list of query profiles, a collector tree and a total rewrite tree. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class AggregationProfileShardResult implements Writeable, ToXContentFragment { public static final String AGGREGATIONS = "aggregations"; diff --git a/server/src/main/java/org/opensearch/search/profile/query/CollectorResult.java b/server/src/main/java/org/opensearch/search/profile/query/CollectorResult.java index 59d35ddd1a8d0..5cb6942445638 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/CollectorResult.java +++ b/server/src/main/java/org/opensearch/search/profile/query/CollectorResult.java @@ -33,6 +33,7 @@ package org.opensearch.search.profile.query; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -55,8 +56,9 @@ * Collectors used in the search. Children CollectorResult's may be * embedded inside of a parent CollectorResult * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CollectorResult implements ToXContentObject, Writeable { public static final String REASON_SEARCH_COUNT = "search_count"; diff --git a/server/src/main/java/org/opensearch/search/profile/query/QueryProfileShardResult.java b/server/src/main/java/org/opensearch/search/profile/query/QueryProfileShardResult.java index a9f3d4aaf7885..76d6229159698 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/QueryProfileShardResult.java +++ b/server/src/main/java/org/opensearch/search/profile/query/QueryProfileShardResult.java @@ -32,6 +32,7 @@ package org.opensearch.search.profile.query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -51,8 +52,9 @@ * A container class to hold the profile results for a single shard in the request. * Contains a list of query profiles, a collector tree and a total rewrite tree. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class QueryProfileShardResult implements Writeable, ToXContentObject { public static final String COLLECTOR = "collector"; diff --git a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java index 8df44e3958083..c9b8a896ed525 100644 --- a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java @@ -40,7 +40,7 @@ import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.set.Sets; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; @@ -76,12 +76,10 @@ * Otherwise the provided field must be a numeric and doc_values must be enabled. In that case a * {@link org.opensearch.search.slice.DocValuesSliceQuery} is used to filter the results. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SliceBuilder implements Writeable, ToXContentObject { - - private static final DeprecationLogger DEPRECATION_LOG = DeprecationLogger.getLogger(SliceBuilder.class); - public static final ParseField FIELD_FIELD = new ParseField("field"); public static final ParseField ID_FIELD = new ParseField("id"); public static final ParseField MAX_FIELD = new ParseField("max"); diff --git a/server/src/main/java/org/opensearch/search/sort/SortOrder.java b/server/src/main/java/org/opensearch/search/sort/SortOrder.java index ed83a0667c5e7..f4b6701c91f58 100644 --- a/server/src/main/java/org/opensearch/search/sort/SortOrder.java +++ b/server/src/main/java/org/opensearch/search/sort/SortOrder.java @@ -32,6 +32,7 @@ package org.opensearch.search.sort; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -43,8 +44,9 @@ /** * A sorting order. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum SortOrder implements Writeable { /** * Ascending order. diff --git a/server/src/main/java/org/opensearch/search/suggest/Suggest.java b/server/src/main/java/org/opensearch/search/suggest/Suggest.java index abd73b45ecc13..b8f2f9b7279cf 100644 --- a/server/src/main/java/org/opensearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/opensearch/search/suggest/Suggest.java @@ -34,6 +34,7 @@ import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.CheckedFunction; import org.opensearch.common.SetOnce; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.Strings; @@ -70,8 +71,9 @@ /** * Top level suggest result, containing the result for each suggestion. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Suggest implements Iterable>>, Writeable, ToXContentFragment { public static final String NAME = "suggest"; @@ -232,7 +234,9 @@ public int hashCode() { /** * The suggestion responses corresponding with the suggestions in the request. + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract static class Suggestion implements Iterable, NamedWriteable, ToXContentFragment { public static final int TYPE = 0; @@ -423,7 +427,10 @@ protected static > void parseEntries( /** * Represents a part from the suggest text with suggested options. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract static class Entry implements Iterable, Writeable, ToXContentFragment { private static final String TEXT = "text"; @@ -611,7 +618,10 @@ protected static void declareCommonFields(ObjectParser { protected abstract Suggest.Suggestion> innerExecute( diff --git a/server/src/main/java/org/opensearch/search/suggest/SuggestionBuilder.java b/server/src/main/java/org/opensearch/search/suggest/SuggestionBuilder.java index 05574bbe06fa6..ee465fc527ea3 100644 --- a/server/src/main/java/org/opensearch/search/suggest/SuggestionBuilder.java +++ b/server/src/main/java/org/opensearch/search/suggest/SuggestionBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.analysis.Analyzer; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -54,8 +55,9 @@ /** * Base class for the different suggestion implementations. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SuggestionBuilder> implements NamedWriteable, ToXContentFragment { protected final String field; diff --git a/server/src/main/java/org/opensearch/search/suggest/SuggestionSearchContext.java b/server/src/main/java/org/opensearch/search/suggest/SuggestionSearchContext.java index f0d8efc64b6b3..32f72d1115973 100644 --- a/server/src/main/java/org/opensearch/search/suggest/SuggestionSearchContext.java +++ b/server/src/main/java/org/opensearch/search/suggest/SuggestionSearchContext.java @@ -33,6 +33,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryShardContext; import java.util.LinkedHashMap; @@ -41,8 +42,9 @@ /** * Context used for suggestion based search * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SuggestionSearchContext { private final Map suggestions = new LinkedHashMap<>(4); @@ -58,8 +60,9 @@ public Map suggestions() { /** * The suggestion context * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract static class SuggestionContext { private BytesRef text; diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionStats.java b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionStats.java index b54f988218ac8..ad91e75c591f3 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionStats.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionStats.java @@ -33,6 +33,7 @@ import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -45,8 +46,9 @@ /** * Stats for completion suggester * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CompletionStats implements Writeable, ToXContentFragment { private static final String COMPLETION = "completion"; diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreInfo.java b/server/src/main/java/org/opensearch/snapshots/RestoreInfo.java index 5a628eb7f04a0..2ccbf308705bc 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreInfo.java @@ -31,6 +31,7 @@ package org.opensearch.snapshots; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -53,8 +54,9 @@ *

    * Returned as part of {@link org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreInfo implements ToXContentObject, Writeable { private String name; diff --git a/server/src/main/java/org/opensearch/snapshots/Snapshot.java b/server/src/main/java/org/opensearch/snapshots/Snapshot.java index e7c92195eff08..4dd930c7b59c0 100644 --- a/server/src/main/java/org/opensearch/snapshots/Snapshot.java +++ b/server/src/main/java/org/opensearch/snapshots/Snapshot.java @@ -32,6 +32,7 @@ package org.opensearch.snapshots; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -42,8 +43,9 @@ /** * Basic information about a snapshot - a SnapshotId and the repository that the snapshot belongs to. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Snapshot implements Writeable { private final String repository; diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotId.java b/server/src/main/java/org/opensearch/snapshots/SnapshotId.java index aec3aebd93585..4eeb956a0cb19 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotId.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotId.java @@ -32,6 +32,7 @@ package org.opensearch.snapshots; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * SnapshotId - snapshot name + snapshot UUID * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SnapshotId implements Comparable, Writeable, ToXContentObject { private static final String NAME = "name"; diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java index 41452671e6a91..191b872cdd563 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; @@ -64,8 +65,9 @@ /** * Information about a snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SnapshotInfo implements Comparable, ToXContent, Writeable { public static final String CONTEXT_MODE_PARAM = "context_mode"; diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardFailure.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardFailure.java index 9d47a4edbfa22..5efcd5f12e37b 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardFailure.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardFailure.java @@ -35,6 +35,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; @@ -52,8 +53,9 @@ /** * Stores information about failures that occurred during shard snapshotting process * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotShardFailure extends ShardOperationFailedException { @Nullable diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotState.java b/server/src/main/java/org/opensearch/snapshots/SnapshotState.java index dd1b3ebb8404d..7ad838f741a3f 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotState.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotState.java @@ -32,11 +32,14 @@ package org.opensearch.snapshots; +import org.opensearch.common.annotation.PublicApi; + /** * Represents the state that a snapshot can be in * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum SnapshotState { /** * Snapshot process has started diff --git a/server/src/main/java/org/opensearch/tasks/Task.java b/server/src/main/java/org/opensearch/tasks/Task.java index ee2eae60eae8f..a21a454a65d0e 100644 --- a/server/src/main/java/org/opensearch/tasks/Task.java +++ b/server/src/main/java/org/opensearch/tasks/Task.java @@ -32,9 +32,8 @@ package org.opensearch.tasks; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.action.NotifyOnceListener; import org.opensearch.core.common.io.stream.NamedWriteable; @@ -62,12 +61,10 @@ /** * Current task information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Task { - - private static final Logger logger = LogManager.getLogger(Task.class); - /** * The request header to mark tasks with specific ids */ @@ -501,7 +498,10 @@ public boolean supportsResourceTracking() { * can change this on version upgrade but we should be careful * because some statuses (reindex) have become defacto standardized because * they are used by systems like Kibana. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Status extends ToXContentObject, NamedWriteable {} /** diff --git a/server/src/main/java/org/opensearch/tasks/TaskInfo.java b/server/src/main/java/org/opensearch/tasks/TaskInfo.java index b51240f234e39..3a04e8e4072b2 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/opensearch/tasks/TaskInfo.java @@ -33,6 +33,7 @@ package org.opensearch.tasks; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; @@ -66,8 +67,9 @@ * and use in APIs. Instead, immutable and writeable TaskInfo objects are used to represent * snapshot information about currently running tasks. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TaskInfo implements Writeable, ToXContentFragment { private final TaskId taskId; diff --git a/server/src/main/java/org/opensearch/tasks/TaskResult.java b/server/src/main/java/org/opensearch/tasks/TaskResult.java index f42e833f5ca08..846fbde48ea59 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResult.java +++ b/server/src/main/java/org/opensearch/tasks/TaskResult.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.client.Requests; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; @@ -63,8 +64,9 @@ * Information about a running task or a task that stored its result. Running tasks just have a {@link #getTask()} while * tasks with stored result will have either a {@link #getError()} or {@link #getResponse()}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TaskResult implements Writeable, ToXContentObject { private final boolean completed; private final TaskInfo task; diff --git a/server/src/main/java/org/opensearch/threadpool/ExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/ExecutorBuilder.java index d26131484ba71..d57b74b3a45e0 100644 --- a/server/src/main/java/org/opensearch/threadpool/ExecutorBuilder.java +++ b/server/src/main/java/org/opensearch/threadpool/ExecutorBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.threadpool; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; @@ -44,8 +45,9 @@ * * @param the underlying type of the executor settings * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ExecutorBuilder { private final String name; diff --git a/server/src/main/java/org/opensearch/threadpool/Scheduler.java b/server/src/main/java/org/opensearch/threadpool/Scheduler.java index 4a7c63a1b559a..9733db29f5939 100644 --- a/server/src/main/java/org/opensearch/threadpool/Scheduler.java +++ b/server/src/main/java/org/opensearch/threadpool/Scheduler.java @@ -34,6 +34,7 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; @@ -154,7 +155,10 @@ static ScheduledCancellable wrapAsScheduledCancellable(ScheduledFuture schedu /** * This interface represents an object whose execution may be cancelled during runtime. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface Cancellable { /** @@ -171,7 +175,10 @@ interface Cancellable { /** * A scheduled cancellable allow cancelling and reading the remaining delay of a scheduled task. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface ScheduledCancellable extends Delayed, Cancellable {} /** diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 5f10986239300..c825ecc8abe9f 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -122,8 +122,9 @@ public static class Names { /** * The threadpool type. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum ThreadPoolType { DIRECT("direct"), FIXED("fixed"), @@ -734,8 +735,9 @@ ExecutorService executor() { /** * The thread pool information. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Info implements Writeable, ToXContentFragment { private final String name; diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPoolInfo.java b/server/src/main/java/org/opensearch/threadpool/ThreadPoolInfo.java index f49ad34e70119..49d961f0b506f 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPoolInfo.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPoolInfo.java @@ -32,6 +32,7 @@ package org.opensearch.threadpool; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.service.ReportingService; @@ -45,8 +46,9 @@ /** * Information about a threadpool * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ThreadPoolInfo implements ReportingService.Info, Iterable { private final List infos; diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPoolStats.java b/server/src/main/java/org/opensearch/threadpool/ThreadPoolStats.java index 7b4c1504d927a..968c2cc4c4887 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPoolStats.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPoolStats.java @@ -33,6 +33,7 @@ package org.opensearch.threadpool; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -49,15 +50,17 @@ /** * Stats for a threadpool * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ThreadPoolStats implements Writeable, ToXContentFragment, Iterable { /** * The statistics. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Stats implements Writeable, ToXContentFragment, Comparable { private final String name; diff --git a/server/src/main/java/org/opensearch/watcher/ResourceWatcher.java b/server/src/main/java/org/opensearch/watcher/ResourceWatcher.java index 0c656c52388df..a54ba60815883 100644 --- a/server/src/main/java/org/opensearch/watcher/ResourceWatcher.java +++ b/server/src/main/java/org/opensearch/watcher/ResourceWatcher.java @@ -31,6 +31,8 @@ package org.opensearch.watcher; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; /** @@ -39,8 +41,9 @@ * Different resource watchers can be registered with {@link ResourceWatcherService} to be called * periodically in order to check for changes in different external resources. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ResourceWatcher { /** * Called once when the resource watcher is added to {@link ResourceWatcherService} diff --git a/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java b/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java index 62002f9c6c323..519d7c6e68a57 100644 --- a/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java +++ b/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java @@ -64,8 +64,9 @@ public class ResourceWatcherService implements Closeable { /** * Frequency level to watch. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Frequency { /** diff --git a/server/src/main/java/org/opensearch/watcher/WatcherHandle.java b/server/src/main/java/org/opensearch/watcher/WatcherHandle.java index 6890364cd0cd6..dd8f898e11860 100644 --- a/server/src/main/java/org/opensearch/watcher/WatcherHandle.java +++ b/server/src/main/java/org/opensearch/watcher/WatcherHandle.java @@ -32,11 +32,14 @@ package org.opensearch.watcher; +import org.opensearch.common.annotation.PublicApi; + /** * Handle to a watcher * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class WatcherHandle { private final ResourceWatcherService.ResourceMonitor monitor; From dcb64d8facde23587da9b09e92e587b68d022e94 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Fri, 10 Nov 2023 12:35:29 -0600 Subject: [PATCH 139/178] Make the gradle check comment more compact (#11151) Continuing in the spirit of #9699, this makes the gradle check comment less verbose while containing all the same information. The goal is to reduce visual noise on the PR feed and maybe allow for more comments before GitHub starts hiding older comments behind the "load more" link. This is super subjective so happy to hear any opinions if the previous format is preferred. Signed-off-by: Andrew Ross --- .github/workflows/gradle-check.yml | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 31578b3fc4660..8c33d41c6b2b4 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -82,10 +82,7 @@ jobs: with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :white_check_mark: - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} + :white_check_mark: Gradle check result for ${{ env.pr_from_sha }}: [${{ env.result }}](${{ env.workflow_url }}) - name: Extract Test Failure if: ${{ github.event_name == 'pull_request_target' && env.result != 'SUCCESS' }} @@ -108,10 +105,8 @@ jobs: with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :grey_exclamation: ${{ env.test_failures }} - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} + :grey_exclamation: Gradle check result for ${{ env.pr_from_sha }}: [${{ env.result }}](${{ env.workflow_url }}) ${{ env.test_failures }} + Please review all [flaky tests](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) that succeeded after retry and create an issue if one does not already exist to track the flaky failure. - name: Create Comment Failure @@ -120,12 +115,9 @@ jobs: with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :x: ${{ env.test_failures }} - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} - Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. - Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? + :x: Gradle check result for ${{ env.pr_from_sha }}: [${{ env.result }}](${{ env.workflow_url }}) + + Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? - name: Create Issue On Push Failure if: ${{ github.event_name == 'push' && failure() }} From 2bd709bf201dde294e21d9959d951bb7f9e5e40a Mon Sep 17 00:00:00 2001 From: Neetika Singhal Date: Fri, 10 Nov 2023 14:13:30 -0800 Subject: [PATCH 140/178] update index random function to fix the bogus documents deletion (#11142) Signed-off-by: Neetika Singhal --- .../basic/TransportTwoNodesSearchIT.java | 1 + .../highlight/HighlighterSearchIT.java | 16 ---------------- .../test/OpenSearchIntegTestCase.java | 19 +++++++++++++------ 3 files changed, 14 insertions(+), 22 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java index ce5f3f63faa66..edceb0cbc0d24 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java @@ -128,6 +128,7 @@ private Set prepareData(int numShards) throws Exception { fullExpectedIds.add(Integer.toString(i)); } refresh(); + indexRandomForConcurrentSearch("test"); return fullExpectedIds; } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index f7bc5eb75ad0f..2afa911223074 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -2013,10 +2013,6 @@ private static String randomStoreField() { } public void testHighlightNoMatchSize() throws IOException, InterruptedException { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10900", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( prepareCreate("test").setMapping( "text", @@ -2128,10 +2124,6 @@ public void testHighlightNoMatchSize() throws IOException, InterruptedException } public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException, InterruptedException { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10900", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( prepareCreate("test").setMapping( "text", @@ -2228,10 +2220,6 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException, } public void testHighlightNoMatchSizeNumberOfFragments() throws IOException, InterruptedException { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10900", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( prepareCreate("test").setMapping( "text", @@ -3416,10 +3404,6 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception { } public void testHighlightQueryRewriteDatesWithNow() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10434", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( client().admin() .indices() diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 0c6c81103922f..7614cd0e8f920 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -1667,10 +1667,6 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } assertThat(actualErrors, emptyIterable()); - if (dummyDocuments) { - bogusIds.addAll(indexRandomForMultipleSlices(indicesArray)); - } - if (!bogusIds.isEmpty()) { // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs! for (List doc : bogusIds) { @@ -1686,6 +1682,9 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma client().admin().indices().prepareRefresh(indicesArray).setIndicesOptions(IndicesOptions.lenientExpandOpen()).get() ); } + if (dummyDocuments) { + indexRandomForMultipleSlices(indicesArray); + } } /* @@ -1694,7 +1693,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma * multiple slices based on segment count. * @param indices the indices in which bogus documents should be ingested * */ - protected Set> indexRandomForMultipleSlices(String... indices) throws InterruptedException { + protected void indexRandomForMultipleSlices(String... indices) throws InterruptedException { Set> bogusIds = new HashSet<>(); int refreshCount = randomIntBetween(2, 3); for (String index : indices) { @@ -1731,7 +1730,15 @@ protected Set> indexRandomForMultipleSlices(String... indices) thro refresh(index); } } - return bogusIds; + for (List doc : bogusIds) { + assertEquals( + "failed to delete a dummy doc [" + doc.get(0) + "][" + doc.get(1) + "]", + DocWriteResponse.Result.DELETED, + client().prepareDelete(doc.get(0), doc.get(1)).setRouting(doc.get(1)).get().getResult() + ); + } + // refresh is called to make sure the bogus docs doesn't affect the search results + refresh(); } private final AtomicInteger dummmyDocIdGenerator = new AtomicInteger(); From 0452d1487fd76640c5345fa92aa1ce9b1bacbc88 Mon Sep 17 00:00:00 2001 From: Niyati Aggarwal <121826855+niyatiagg@users.noreply.github.com> Date: Sat, 11 Nov 2023 16:45:34 -0800 Subject: [PATCH 141/178] Adding support for dynamically updating Leader/follower checker timeouts (#10528) * making leader check timeout dynamic Signed-off-by: Niyati Aggarwal * making follower check timeout dynamic Signed-off-by: Niyati Aggarwal * fixing existing unit tests Signed-off-by: Niyati Aggarwal * fixing checkstyle violations Signed-off-by: Niyati Aggarwal * adding tests for leader/follower check timeout Signed-off-by: Niyati Aggarwal * setting maximum and minimum timeout value for leader/follower checker Signed-off-by: Niyati Aggarwal * adding tests for checking boundary cases Signed-off-by: Niyati Aggarwal * Fixing checkstyle violations Signed-off-by: Niyati Aggarwal * changed the log file and added other suggested changes Signed-off-by: Niyati Aggarwal * fixing checkstyle violations Signed-off-by: Niyati Aggarwal * Addressing review comments Signed-off-by: Niyati Aggarwal * addressing proposed changes Signed-off-by: Niyati Aggarwal * Applying checkstyle fixes Signed-off-by: Niyati Aggarwal * Fixing flakiness for existing tests Signed-off-by: Niyati Aggarwal * Applying checkstyle fixes Signed-off-by: Niyati Aggarwal * Fixing the timeout value limits for randomSettings Signed-off-by: Niyati Aggarwal --------- Signed-off-by: Niyati Aggarwal --- CHANGELOG.md | 1 + .../cluster/coordination/Coordinator.java | 3 +- .../coordination/FollowersChecker.java | 14 ++- .../cluster/coordination/LeaderChecker.java | 13 +- .../transport/TransportChannel.java | 2 +- .../CoordinationCheckerSettingsTests.java | 113 ++++++++++++++++++ .../coordination/FollowersCheckerTests.java | 29 +++-- .../coordination/LeaderCheckerTests.java | 12 +- 8 files changed, 164 insertions(+), 23 deletions(-) create mode 100644 server/src/test/java/org/opensearch/cluster/coordination/CoordinationCheckerSettingsTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index e8f27b9323306..e45eb3870b522 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -129,6 +129,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999)) - Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) - Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) +- Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) - Use iterative approach to evaluate Regex.simpleMatch ([#11060](https://github.com/opensearch-project/OpenSearch/pull/11060)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index a4ffab7fb70c9..5a07f964f94a4 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -261,9 +261,10 @@ public Coordinator( this::handlePublishRequest, this::handleApplyCommit ); - this.leaderChecker = new LeaderChecker(settings, transportService, this::onLeaderFailure, nodeHealthService); + this.leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, this::onLeaderFailure, nodeHealthService); this.followersChecker = new FollowersChecker( settings, + clusterSettings, transportService, this::onFollowerCheckRequest, this::removeNode, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java index f69a4f771cf21..70bb0515bb022 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.coordination.Coordinator.Mode; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -98,7 +99,9 @@ public class FollowersChecker { "cluster.fault_detection.follower_check.timeout", TimeValue.timeValueMillis(10000), TimeValue.timeValueMillis(1), - Setting.Property.NodeScope + TimeValue.timeValueMillis(60000), + Setting.Property.NodeScope, + Setting.Property.Dynamic ); // the number of failed checks that must happen before the follower is considered to have failed. @@ -112,7 +115,7 @@ public class FollowersChecker { private final Settings settings; private final TimeValue followerCheckInterval; - private final TimeValue followerCheckTimeout; + private TimeValue followerCheckTimeout; private final int followerCheckRetryCount; private final BiConsumer onNodeFailure; private final Consumer handleRequestAndUpdateState; @@ -127,6 +130,7 @@ public class FollowersChecker { public FollowersChecker( Settings settings, + ClusterSettings clusterSettings, TransportService transportService, Consumer handleRequestAndUpdateState, BiConsumer onNodeFailure, @@ -141,7 +145,7 @@ public FollowersChecker( followerCheckInterval = FOLLOWER_CHECK_INTERVAL_SETTING.get(settings); followerCheckTimeout = FOLLOWER_CHECK_TIMEOUT_SETTING.get(settings); followerCheckRetryCount = FOLLOWER_CHECK_RETRY_COUNT_SETTING.get(settings); - + clusterSettings.addSettingsUpdateConsumer(FOLLOWER_CHECK_TIMEOUT_SETTING, this::setFollowerCheckTimeout); updateFastResponseState(0, Mode.CANDIDATE); transportService.registerRequestHandler( FOLLOWER_CHECK_ACTION_NAME, @@ -159,6 +163,10 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti }); } + private void setFollowerCheckTimeout(TimeValue followerCheckTimeout) { + this.followerCheckTimeout = followerCheckTimeout; + } + /** * Update the set of known nodes, starting to check any new ones and stopping checking any previously-known-but-now-unknown ones. */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java index 69ba1f977f326..8d4373b865f62 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -97,7 +98,9 @@ public class LeaderChecker { "cluster.fault_detection.leader_check.timeout", TimeValue.timeValueMillis(10000), TimeValue.timeValueMillis(1), - Setting.Property.NodeScope + TimeValue.timeValueMillis(60000), + Setting.Property.NodeScope, + Setting.Property.Dynamic ); // the number of failed checks that must happen before the leader is considered to have failed. @@ -111,7 +114,7 @@ public class LeaderChecker { private final Settings settings; private final TimeValue leaderCheckInterval; - private final TimeValue leaderCheckTimeout; + private TimeValue leaderCheckTimeout; private final int leaderCheckRetryCount; private final TransportService transportService; private final Consumer onLeaderFailure; @@ -123,6 +126,7 @@ public class LeaderChecker { LeaderChecker( final Settings settings, + final ClusterSettings clusterSettings, final TransportService transportService, final Consumer onLeaderFailure, NodeHealthService nodeHealthService @@ -134,6 +138,7 @@ public class LeaderChecker { this.transportService = transportService; this.onLeaderFailure = onLeaderFailure; this.nodeHealthService = nodeHealthService; + clusterSettings.addSettingsUpdateConsumer(LEADER_CHECK_TIMEOUT_SETTING, this::setLeaderCheckTimeout); transportService.registerRequestHandler( LEADER_CHECK_ACTION_NAME, @@ -155,6 +160,10 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti }); } + private void setLeaderCheckTimeout(TimeValue leaderCheckTimeout) { + this.leaderCheckTimeout = leaderCheckTimeout; + } + public DiscoveryNode leader() { CheckScheduler checkScheduler = currentChecker.get(); return checkScheduler == null ? null : checkScheduler.leader; diff --git a/server/src/main/java/org/opensearch/transport/TransportChannel.java b/server/src/main/java/org/opensearch/transport/TransportChannel.java index 7423d59103302..f84ee5dc745c3 100644 --- a/server/src/main/java/org/opensearch/transport/TransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TransportChannel.java @@ -82,7 +82,7 @@ static void sendErrorResponse(TransportChannel channel, String actionName, Trans /** * Returns the contextual property associated with this specific transport channel (the - * implementation of how such properties are managed depends on the the particular + * implementation of how such properties are managed depends on the particular * transport engine). * * @param name the name of the property diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationCheckerSettingsTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationCheckerSettingsTests.java new file mode 100644 index 0000000000000..56bd2d94dce84 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationCheckerSettingsTests.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import static org.opensearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING; +import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING; +import static org.opensearch.common.unit.TimeValue.timeValueSeconds; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +public class CoordinationCheckerSettingsTests extends OpenSearchSingleNodeTestCase { + public void testFollowerCheckTimeoutValueUpdate() { + Setting setting1 = FOLLOWER_CHECK_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "60s").build(); + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(timeSettings1) + .execute() + .actionGet(); + + assertAcked(response); + assertEquals(timeValueSeconds(60), setting1.get(response.getPersistentSettings())); + } finally { + // cleanup + timeSettings1 = Settings.builder().putNull(setting1.getKey()).build(); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + } + + public void testFollowerCheckTimeoutMaxValue() { + Setting setting1 = FOLLOWER_CHECK_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "61s").build(); + + assertThrows( + "failed to parse value [61s] for setting [" + setting1.getKey() + "], must be <= [60000ms]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + ); + } + + public void testFollowerCheckTimeoutMinValue() { + Setting setting1 = FOLLOWER_CHECK_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "0s").build(); + + assertThrows( + "failed to parse value [0s] for setting [" + setting1.getKey() + "], must be >= [1ms]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + ); + } + + public void testLeaderCheckTimeoutValueUpdate() { + Setting setting1 = LEADER_CHECK_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "60s").build(); + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(timeSettings1) + .execute() + .actionGet(); + assertAcked(response); + assertEquals(timeValueSeconds(60), setting1.get(response.getPersistentSettings())); + } finally { + // cleanup + timeSettings1 = Settings.builder().putNull(setting1.getKey()).build(); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + } + + public void testLeaderCheckTimeoutMaxValue() { + Setting setting1 = LEADER_CHECK_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "61s").build(); + + assertThrows( + "failed to parse value [61s] for setting [" + setting1.getKey() + "], must be <= [60000ms]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + ); + } + + public void testLeaderCheckTimeoutMinValue() { + Setting setting1 = LEADER_CHECK_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "0s").build(); + + assertThrows( + "failed to parse value [0s] for setting [" + setting1.getKey() + "], must be >= [1ms]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + ); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java index c152a1606681e..a106706c00732 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; import org.opensearch.core.common.io.stream.StreamInput; @@ -96,7 +97,7 @@ public class FollowersCheckerTests extends OpenSearchTestCase { public void testChecksExpectedNodes() { final DiscoveryNode localNode = new DiscoveryNode("local-node", buildNewFakeTransportAddress(), Version.CURRENT); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getName()).build(); - + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DiscoveryNodes[] discoveryNodesHolder = new DiscoveryNodes[] { DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).build() }; @@ -132,6 +133,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req final FollowersChecker followersChecker = new FollowersChecker( settings, + clusterSettings, transportService, fcr -> { assert false : fcr; }, (node, reason) -> { @@ -257,6 +259,7 @@ public void testFailsNodeThatDisconnects() { final DiscoveryNode localNode = new DiscoveryNode("local-node", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode otherNode = new DiscoveryNode("other-node", buildNewFakeTransportAddress(), Version.CURRENT); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getName()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final MockTransport mockTransport = new MockTransport() { @@ -297,6 +300,7 @@ public String toString() { final FollowersChecker followersChecker = new FollowersChecker( settings, + clusterSettings, transportService, fcr -> { assert false : fcr; }, (node, reason) -> { @@ -336,6 +340,7 @@ private void testBehaviourOfFailingNode( final DiscoveryNode localNode = new DiscoveryNode("local-node", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode otherNode = new DiscoveryNode("other-node", buildNewFakeTransportAddress(), Version.CURRENT); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getName()).put(testSettings).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final MockTransport mockTransport = new MockTransport() { @@ -384,6 +389,7 @@ public String toString() { final FollowersChecker followersChecker = new FollowersChecker( settings, + clusterSettings, transportService, fcr -> { assert false : fcr; }, (node, reason) -> { @@ -464,6 +470,7 @@ public void testUnhealthyNodeRejectsImmediately() { final DiscoveryNode leader = new DiscoveryNode("leader", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode follower = new DiscoveryNode("follower", buildNewFakeTransportAddress(), Version.CURRENT); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), follower.getName()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final MockTransport mockTransport = new MockTransport() { @@ -488,7 +495,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req final AtomicBoolean calledCoordinator = new AtomicBoolean(); final AtomicReference coordinatorException = new AtomicReference<>(); - final FollowersChecker followersChecker = new FollowersChecker(settings, transportService, fcr -> { + final FollowersChecker followersChecker = new FollowersChecker(settings, clusterSettings, transportService, fcr -> { assertTrue(calledCoordinator.compareAndSet(false, true)); final RuntimeException exception = coordinatorException.get(); if (exception != null) { @@ -536,6 +543,7 @@ public void testResponder() { final DiscoveryNode leader = new DiscoveryNode("leader", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode follower = new DiscoveryNode("follower", buildNewFakeTransportAddress(), Version.CURRENT); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), follower.getName()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final MockTransport mockTransport = new MockTransport() { @@ -560,7 +568,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req final AtomicBoolean calledCoordinator = new AtomicBoolean(); final AtomicReference coordinatorException = new AtomicReference<>(); - final FollowersChecker followersChecker = new FollowersChecker(settings, transportService, fcr -> { + final FollowersChecker followersChecker = new FollowersChecker(settings, clusterSettings, transportService, fcr -> { assertTrue(calledCoordinator.compareAndSet(false, true)); final RuntimeException exception = coordinatorException.get(); if (exception != null) { @@ -700,6 +708,7 @@ public void testPreferClusterManagerNodes() { DiscoveryNodes discoveryNodes = discoNodesBuilder.localNodeId(nodes.get(0).getId()).build(); CapturingTransport capturingTransport = new CapturingTransport(); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), nodes.get(0).getName()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); TransportService transportService = capturingTransport.createTransportService( Settings.EMPTY, @@ -710,15 +719,9 @@ public void testPreferClusterManagerNodes() { emptySet(), NoopTracer.INSTANCE ); - final FollowersChecker followersChecker = new FollowersChecker( - Settings.EMPTY, - transportService, - fcr -> { assert false : fcr; }, - (node, reason) -> { - assert false : node; - }, - () -> new StatusInfo(HEALTHY, "healthy-info") - ); + final FollowersChecker followersChecker = new FollowersChecker(Settings.EMPTY, clusterSettings, transportService, fcr -> { + assert false : fcr; + }, (node, reason) -> { assert false : node; }, () -> new StatusInfo(HEALTHY, "healthy-info")); followersChecker.setCurrentNodes(discoveryNodes); List followerTargets = Stream.of(capturingTransport.getCapturedRequestsAndClear()) .map(cr -> cr.node) @@ -754,7 +757,7 @@ private static Settings randomSettings() { settingsBuilder.put(FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), randomIntBetween(100, 100000) + "ms"); } if (randomBoolean()) { - settingsBuilder.put(FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 100000) + "ms"); + settingsBuilder.put(FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 60000) + "ms"); } return settingsBuilder.build(); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java index 8915f4c5c1274..fe65058333116 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.coordination.LeaderChecker.LeaderCheckRequest; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.transport.TransportResponse; @@ -117,6 +118,7 @@ public void testFollowerBehaviour() { final AtomicBoolean allResponsesFail = new AtomicBoolean(); final Settings settings = settingsBuilder.build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); logger.info("--> using {}", settings); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); @@ -174,7 +176,7 @@ public String toString() { final AtomicBoolean leaderFailed = new AtomicBoolean(); - final LeaderChecker leaderChecker = new LeaderChecker(settings, transportService, e -> { + final LeaderChecker leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, e -> { assertThat(e.getMessage(), matchesRegex("node \\[.*\\] failed \\[[1-9][0-9]*\\] consecutive checks")); assertTrue(leaderFailed.compareAndSet(false, true)); }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info")); @@ -242,6 +244,7 @@ public void testFollowerFailsImmediatelyOnDisconnection() { final Response[] responseHolder = new Response[] { Response.SUCCESS }; final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getId()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final MockTransport mockTransport = new MockTransport() { @Override @@ -290,7 +293,7 @@ public String toString() { transportService.acceptIncomingRequests(); final AtomicBoolean leaderFailed = new AtomicBoolean(); - final LeaderChecker leaderChecker = new LeaderChecker(settings, transportService, e -> { + final LeaderChecker leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, e -> { assertThat(e.getMessage(), anyOf(endsWith("disconnected"), endsWith("disconnected during check"))); assertTrue(leaderFailed.compareAndSet(false, true)); }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info")); @@ -357,6 +360,7 @@ public void testFollowerFailsImmediatelyOnHealthCheckFailure() { final Response[] responseHolder = new Response[] { Response.SUCCESS }; final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getId()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final MockTransport mockTransport = new MockTransport() { @Override @@ -403,7 +407,7 @@ public String toString() { transportService.acceptIncomingRequests(); final AtomicBoolean leaderFailed = new AtomicBoolean(); - final LeaderChecker leaderChecker = new LeaderChecker(settings, transportService, e -> { + final LeaderChecker leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, e -> { assertThat(e.getMessage(), endsWith("failed health checks")); assertTrue(leaderFailed.compareAndSet(false, true)); }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info")); @@ -432,6 +436,7 @@ public void testLeaderBehaviour() { final DiscoveryNode localNode = new DiscoveryNode("local-node", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode otherNode = new DiscoveryNode("other-node", buildNewFakeTransportAddress(), Version.CURRENT); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getId()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final CapturingTransport capturingTransport = new CapturingTransport(); AtomicReference nodeHealthServiceStatus = new AtomicReference<>(new StatusInfo(UNHEALTHY, "unhealthy-info")); @@ -450,6 +455,7 @@ public void testLeaderBehaviour() { final LeaderChecker leaderChecker = new LeaderChecker( settings, + clusterSettings, transportService, e -> fail("shouldn't be checking anything"), () -> nodeHealthServiceStatus.get() From b01e4831e5725fc51194f074deb90168c72def56 Mon Sep 17 00:00:00 2001 From: Ashish Date: Mon, 13 Nov 2023 02:10:01 +0530 Subject: [PATCH 142/178] [Remote Translog] Fix translog inconsistent generation assertion failure in ITs (#10985) --- .../index/translog/RemoteFsTranslog.java | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 65d16e213cad1..8fb420e8fa1da 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -380,12 +380,14 @@ public boolean syncNeeded() { public void close() throws IOException { assert Translog.calledFromOutsideOrViaTragedyClose() : shardId + "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method"; - if (closed.compareAndSet(false, true)) { - try (ReleasableLock lock = writeLock.acquire()) { - sync(); - } finally { - logger.debug("translog closed"); - closeFilesIfNoPendingRetentionLocks(); + try (ReleasableLock lock = writeLock.acquire()) { + if (closed.compareAndSet(false, true)) { + try { + sync(); + } finally { + logger.debug("translog closed"); + closeFilesIfNoPendingRetentionLocks(); + } } } } From 8077a26e709660f522ca78420d7d38a39139030f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 11:41:39 -0500 Subject: [PATCH 143/178] Bump stefanzweifel/git-auto-commit-action from 4 to 5 (#11171) * Bump stefanzweifel/git-auto-commit-action from 4 to 5 Bumps [stefanzweifel/git-auto-commit-action](https://github.com/stefanzweifel/git-auto-commit-action) from 4 to 5. - [Release notes](https://github.com/stefanzweifel/git-auto-commit-action/releases) - [Changelog](https://github.com/stefanzweifel/git-auto-commit-action/blob/master/CHANGELOG.md) - [Commits](https://github.com/stefanzweifel/git-auto-commit-action/compare/v4...v5) --- updated-dependencies: - dependency-name: stefanzweifel/git-auto-commit-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/dependabot_pr.yml | 6 +++--- CHANGELOG.md | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index 2a5e539b214d3..e6feb3b852ad0 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -27,7 +27,7 @@ jobs: ./gradlew updateSHAs - name: Commit the changes - uses: stefanzweifel/git-auto-commit-action@v4.7.2 + uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: Updating SHAs branch: ${{ github.head_ref }} @@ -40,7 +40,7 @@ jobs: ./gradlew spotlessApply - name: Commit the changes - uses: stefanzweifel/git-auto-commit-action@v4.7.2 + uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: Spotless formatting branch: ${{ github.head_ref }} @@ -54,7 +54,7 @@ jobs: version: 'Unreleased 2.x' - name: Commit the changes - uses: stefanzweifel/git-auto-commit-action@v4 + uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: "Update changelog" branch: ${{ github.head_ref }} diff --git a/CHANGELOG.md b/CHANGELOG.md index e45eb3870b522..daf5568fa7a77 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -116,6 +116,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.squareup.okio:okio` from 3.5.0 to 3.6.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637)) - Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.21.1 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000)) - Bump `aws-actions/configure-aws-credentials` from 2 to 4 ([#10504](https://github.com/opensearch-project/OpenSearch/pull/10504)) +- Bump `stefanzweifel/git-auto-commit-action` from 4 to 5 ([#11171](https://github.com/opensearch-project/OpenSearch/pull/11171)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) From c676479f6d1a0d8da8d3b23d05a31747a5322dac Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Mon, 13 Nov 2023 10:53:07 -0600 Subject: [PATCH 144/178] Mute CardinalityWithRequestBreakerIT pending fix (#10752) Relates #10154 Signed-off-by: Andrew Ross --- .../aggregations/metrics/CardinalityWithRequestBreakerIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java index 2bf5230c67b43..85c36ec0ba78d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java @@ -76,6 +76,7 @@ protected Settings featureFlagSettings() { /** * Test that searches using cardinality aggregations returns all request breaker memory. */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10154") public void testRequestBreaker() throws Exception { final String requestBreaker = randomIntBetween(1, 10000) + "kb"; logger.info("--> Using request breaker setting: {}", requestBreaker); From 6c980bc9079c383569613a005937eedb5c97cacc Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Mon, 13 Nov 2023 15:49:58 -0800 Subject: [PATCH 145/178] Add indexRandomForConcurrentSearch method to tests: (#11118) * IndexStatsIT.java * SearchTimeoutIT.java * NaNSortingIT.java * NestedIT.java * RangeIT.java Signed-off-by: Jay Deng --- .../org/opensearch/indices/stats/IndexStatsIT.java | 4 +++- .../java/org/opensearch/search/SearchTimeoutIT.java | 10 +++++----- .../search/aggregations/bucket/NaNSortingIT.java | 1 + .../search/aggregations/bucket/NestedIT.java | 4 ++++ .../opensearch/search/aggregations/bucket/RangeIT.java | 2 ++ 5 files changed, 15 insertions(+), 6 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index 0967acb37d3e8..9c96d4861d426 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -169,7 +169,7 @@ private Settings.Builder settingsBuilder() { return Settings.builder().put(indexSettings()); } - public void testFieldDataStats() { + public void testFieldDataStats() throws InterruptedException { assertAcked( client().admin() .indices() @@ -182,6 +182,7 @@ public void testFieldDataStats() { client().prepareIndex("test").setId("1").setSource("field", "value1", "field2", "value1").execute().actionGet(); client().prepareIndex("test").setId("2").setSource("field", "value2", "field2", "value2").execute().actionGet(); client().admin().indices().prepareRefresh().execute().actionGet(); + indexRandomForConcurrentSearch("test"); NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); assertThat( @@ -305,6 +306,7 @@ public void testClearAllCaches() throws Exception { client().prepareIndex("test").setId("1").setSource("field", "value1").execute().actionGet(); client().prepareIndex("test").setId("2").setSource("field", "value2").execute().actionGet(); client().admin().indices().prepareRefresh().execute().actionGet(); + indexRandomForConcurrentSearch("test"); NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); assertThat( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java index 94816346e6c9e..52cc797ddd8da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java @@ -93,6 +93,7 @@ public void testSimpleTimeout() throws Exception { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } refresh("test"); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setTimeout(new TimeValue(5, TimeUnit.MILLISECONDS)) @@ -104,12 +105,11 @@ public void testSimpleTimeout() throws Exception { } public void testSimpleDoesNotTimeout() throws Exception { - final int numDocs = 10; + final int numDocs = 9; for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); + client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); } - refresh("test"); - + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setTimeout(new TimeValue(10000, TimeUnit.SECONDS)) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap()))) @@ -122,7 +122,7 @@ public void testSimpleDoesNotTimeout() throws Exception { public void testPartialResultsIntolerantTimeout() throws Exception { client().prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - + indexRandomForConcurrentSearch("test"); OpenSearchException ex = expectThrows( OpenSearchException.class, () -> client().prepareSearch("test") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java index 1ef2c0e8db8c7..6289cd5e36151 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java @@ -172,6 +172,7 @@ public void setupSuiteScopeCluster() throws Exception { client().prepareIndex("idx").setSource(source.endObject()).get(); } refresh(); + indexRandomForMultipleSlices("idx"); ensureSearchable(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java index b3009ffcf4f45..7af2ac218800d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java @@ -224,6 +224,7 @@ public void setupSuiteScopeCluster() throws Exception { ) ); indexRandom(true, builders); + indexRandomForMultipleSlices("idx"); ensureSearchable(); } @@ -354,6 +355,7 @@ public void testNestedAsSubAggregation() throws Exception { } public void testNestNestedAggs() throws Exception { + indexRandomForConcurrentSearch("idx_nested_nested_aggs"); SearchResponse response = client().prepareSearch("idx_nested_nested_aggs") .addAggregation( nested("level1", "nested1").subAggregation( @@ -607,6 +609,7 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("idx4"); SearchResponse response = client().prepareSearch("idx4") .addAggregation( @@ -782,6 +785,7 @@ public void testFilterAggInsideNestedAgg() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("classes"); SearchResponse response = client().prepareSearch("classes") .addAggregation( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java index 64ab6f1382ac3..5812b7796c33e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java @@ -184,6 +184,7 @@ public void setupSuiteScopeCluster() throws Exception { builders.add(client().prepareIndex("new_index").setSource(Collections.emptyMap())); indexRandom(true, builders); + indexRandomForMultipleSlices("idx", "old_index", "new_index"); ensureSearchable(); } @@ -917,6 +918,7 @@ public void testOverlappingRanges() throws Exception { } public void testEmptyAggregation() throws Exception { + indexRandomForConcurrentSearch("empty_bucket_idx"); SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) .addAggregation( From 5143198f43f05e5d1692624adc3733ac19b90004 Mon Sep 17 00:00:00 2001 From: Chaitanya Gohel <104654647+gashutos@users.noreply.github.com> Date: Tue, 14 Nov 2023 20:37:22 +0530 Subject: [PATCH 146/178] Enabling sort optimization back for half_float with custom comparators (#11024) * Enabling sort optimizatin back for half_float with custom comparators Signed-off-by: Chaitanya Gohel * Fixing tests Signed-off-by: Chaitanya Gohel * Adding test for Indecx sort half_float Signed-off-by: Chaitanya Gohel * Making indexFieldData provate in FloatValuesComparatorSource Signed-off-by: Chaitanya Gohel * Update server/src/main/java/org/opensearch/index/search/comparators/HalfFloatComparator.java Co-authored-by: Andriy Redko Signed-off-by: Chaitanya Gohel <104654647+gashutos@users.noreply.github.com> * Adding missing value instead null Signed-off-by: Chaitanya Gohel * Adding more tests for desc order sort Signed-off-by: Chaitanya Gohel * Update rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml Co-authored-by: Prabhakar Sithanandam Signed-off-by: Chaitanya Gohel <104654647+gashutos@users.noreply.github.com> * Adding tests in case missing values are competitive Signed-off-by: Chaitanya Gohel * chanheing newly added test supported version 3.0.0 Signed-off-by: Chaitanya Gohel * Assing missing float tests Signed-off-by: Chaitanya Gohel * Remove missing value change to be part of another PR Signed-off-by: Chaitanya Gohel --------- Signed-off-by: Chaitanya Gohel Signed-off-by: Chaitanya Gohel <104654647+gashutos@users.noreply.github.com> Co-authored-by: Andriy Redko Co-authored-by: Prabhakar Sithanandam --- CHANGELOG.md | 1 + .../test/indices.sort/10_basic.yml | 20 +++ .../test/search/260_sort_mixed.yml | 85 ++++++++++++ .../test/search/90_search_after.yml | 127 ++++++++++++++++++ .../opensearch/search/sort/FieldSortIT.java | 60 +++++++++ .../fielddata/IndexNumericFieldData.java | 5 +- .../HalfFloatValuesComparatorSource.java | 75 +++++++++++ .../comparators/HalfFloatComparator.java | 111 +++++++++++++++ 8 files changed, 483 insertions(+), 1 deletion(-) create mode 100644 server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/HalfFloatValuesComparatorSource.java create mode 100644 server/src/main/java/org/opensearch/index/search/comparators/HalfFloatComparator.java diff --git a/CHANGELOG.md b/CHANGELOG.md index daf5568fa7a77..2165fcb4700fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286)) - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) - [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) +- Add back half_float BKD based sort query optimization ([#11024](https://github.com/opensearch-project/OpenSearch/pull/11024)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml index b9089689b0cf1..3b7ea15164e9f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml @@ -156,3 +156,23 @@ query: {"range": { "rank": { "from": 0 } } } track_total_hits: false size: 3 + +--- +"Index Sort half float": + - do: + catch: bad_request + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + index.sort.field: rank + mappings: + properties: + rank: + type: half_float + + # This should failed with 400 as half_float is not supported for index sort + - match: { status: 400 } + - match: { error.type: illegal_argument_exception } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml index ba2b18eb3b6d0..a04dc308b2a06 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml @@ -20,6 +20,7 @@ properties: counter: type: double + - do: bulk: refresh: true @@ -119,3 +120,87 @@ - match: { status: 400 } - match: { error.type: search_phase_execution_exception } - match: { error.caused_by.reason: "Can't do sort across indices, as a field has [unsigned_long] type in one index, and different type in another index!" } + +--- +"search across indices with mixed long and double and float numeric types": + - skip: + version: " - 2.10.99" + reason: half float was broken before 2.11 + + - do: + indices.create: + index: test_1 + body: + mappings: + properties: + counter: + type: long + + - do: + indices.create: + index: test_2 + body: + mappings: + properties: + counter: + type: double + + - do: + indices.create: + index: test_3 + body: + mappings: + properties: + counter: + type: half_float + + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + - counter: 223372036854775800 + - index: + _index: test_2 + - counter: 1223372036854775800.23 + - index: + _index: test_2 + - counter: 184.4 + - index: + _index: test_3 + - counter: 187.4 + - index: + _index: test_3 + - counter: 194.4 + + - do: + search: + index: test_* + rest_total_hits_as_int: true + body: + sort: [{ counter: desc }] + - match: { hits.total: 5 } + - length: { hits.hits: 5 } + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.0._source.counter: 1223372036854775800.23 } + - match: { hits.hits.0.sort.0: 1223372036854775800.23 } + - match: { hits.hits.1._index: test_1 } + - match: { hits.hits.1._source.counter: 223372036854775800 } + - match: { hits.hits.1.sort.0: 223372036854775800 } + - match: { hits.hits.2._index: test_3 } + - match: { hits.hits.2._source.counter: 194.4 } + + - do: + search: + index: test_* + rest_total_hits_as_int: true + body: + sort: [{ counter: asc }] + - match: { hits.total: 5 } + - length: { hits.hits: 5 } + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.0._source.counter: 184.4 } + - match: { hits.hits.0.sort.0: 184.4 } + - match: { hits.hits.1._index: test_3 } + - match: { hits.hits.1._source.counter: 187.4 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml index 55e1566656faf..1563daba9de6d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml @@ -320,3 +320,130 @@ - length: { hits.hits: 1 } - match: { hits.hits.0._index: test } - match: { hits.hits.0._source.population: null } + +--- +"half float": + - skip: + version: " - 2.10.99" + reason: half_float was broken for 2.10 and earlier + + - do: + indices.create: + index: test + body: + mappings: + properties: + population: + type: half_float + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"population": 184.4} + {"index":{}} + {"population": 194.4} + {"index":{}} + {"population": 144.4} + {"index":{}} + {"population": 174.4} + {"index":{}} + {"population": 164.4} + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 3 + sort: [ { population: desc } ] + - match: { hits.total: 5 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 194.4 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.population: 184.4 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.population: 174.4 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 3 + sort: [ { population: asc } ] + - match: { hits.total: 5 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 144.4 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.population: 164.4 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.population: 174.4 } + + # search_after with the asc sort + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 1 + sort: [ { population: asc } ] + search_after: [ 184.375 ] # this is rounded sort value in sort result + - match: { hits.total: 5 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 194.4 } + + # search_after with the desc sort + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 1 + sort: [ { population: desc } ] + search_after: [ 164.375 ] # this is rounded sort value in sort result + - match: { hits.total: 5 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 144.4 } + + # search_after with the asc sort with missing + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"population": null} + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 5 + "sort": [ { "population": { "order": "asc", "missing": "_last" } } ] + search_after: [ 200 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 6 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: null } + + # search_after with the desc sort with missing + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 5 + "sort": [ { "population": { "order": "desc", "missing": "_last" } } ] + search_after: [ 100 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 6 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: null } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index bee242b933dfd..d4980a64a3977 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -605,6 +605,9 @@ public void testSimpleSorts() throws Exception { .startObject("float_value") .field("type", "float") .endObject() + .startObject("half_float_value") + .field("type", "half_float") + .endObject() .startObject("double_value") .field("type", "double") .endObject() @@ -628,6 +631,7 @@ public void testSimpleSorts() throws Exception { .field("long_value", i) .field("unsigned_long_value", UNSIGNED_LONG_BASE.add(BigInteger.valueOf(10000 * i))) .field("float_value", 0.1 * i) + .field("half_float_value", 0.1 * i) .field("double_value", 0.1 * i) .endObject() ); @@ -794,6 +798,28 @@ public void testSimpleSorts() throws Exception { assertThat(searchResponse.toString(), not(containsString("error"))); + // HALF_FLOAT + size = 1 + random.nextInt(10); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("half_float_value", SortOrder.ASC).get(); + + assertHitCount(searchResponse, 10L); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + size = 1 + random.nextInt(10); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("half_float_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, 10); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + // DOUBLE size = 1 + random.nextInt(10); searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC).get(); @@ -1330,6 +1356,9 @@ public void testSortMVField() throws Exception { .startObject("float_values") .field("type", "float") .endObject() + .startObject("half_float_values") + .field("type", "float") + .endObject() .startObject("double_values") .field("type", "double") .endObject() @@ -1351,6 +1380,7 @@ public void testSortMVField() throws Exception { .array("short_values", 1, 5, 10, 8) .array("byte_values", 1, 5, 10, 8) .array("float_values", 1f, 5f, 10f, 8f) + .array("half_float_values", 1f, 5f, 10f, 8f) .array("double_values", 1d, 5d, 10d, 8d) .array("string_values", "01", "05", "10", "08") .endObject() @@ -1365,6 +1395,7 @@ public void testSortMVField() throws Exception { .array("short_values", 11, 15, 20, 7) .array("byte_values", 11, 15, 20, 7) .array("float_values", 11f, 15f, 20f, 7f) + .array("half_float_values", 11f, 15f, 20f, 7f) .array("double_values", 11d, 15d, 20d, 7d) .array("string_values", "11", "15", "20", "07") .endObject() @@ -1379,6 +1410,7 @@ public void testSortMVField() throws Exception { .array("short_values", 2, 1, 3, -4) .array("byte_values", 2, 1, 3, -4) .array("float_values", 2f, 1f, 3f, -4f) + .array("half_float_values", 2f, 1f, 3f, -4f) .array("double_values", 2d, 1d, 3d, -4d) .array("string_values", "02", "01", "03", "!4") .endObject() @@ -1585,6 +1617,34 @@ public void testSortMVField() throws Exception { assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("half_float_values", SortOrder.ASC).get(); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(-4f)); + + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(1f)); + + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(7f)); + + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("half_float_values", SortOrder.DESC).get(); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(20f)); + + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(10f)); + + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.ASC).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java index 6fc074fe0de95..b0ff944d014de 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java @@ -42,6 +42,7 @@ import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; import org.opensearch.index.fielddata.fieldcomparator.FloatValuesComparatorSource; +import org.opensearch.index.fielddata.fieldcomparator.HalfFloatValuesComparatorSource; import org.opensearch.index.fielddata.fieldcomparator.IntValuesComparatorSource; import org.opensearch.index.fielddata.fieldcomparator.LongValuesComparatorSource; import org.opensearch.index.fielddata.fieldcomparator.UnsignedLongValuesComparatorSource; @@ -220,6 +221,8 @@ private XFieldComparatorSource comparatorSource( final XFieldComparatorSource source; switch (targetNumericType) { case HALF_FLOAT: + source = new HalfFloatValuesComparatorSource(this, missingValue, sortMode, nested); + break; case FLOAT: source = new FloatValuesComparatorSource(this, missingValue, sortMode, nested); break; @@ -242,7 +245,7 @@ private XFieldComparatorSource comparatorSource( assert !targetNumericType.isFloatingPoint(); source = new IntValuesComparatorSource(this, missingValue, sortMode, nested); } - if (targetNumericType != getNumericType() || getNumericType() == NumericType.HALF_FLOAT) { + if (targetNumericType != getNumericType()) { source.disableSkipping(); // disable skipping logic for cast of sort field } return source; diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/HalfFloatValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/HalfFloatValuesComparatorSource.java new file mode 100644 index 0000000000000..7e3936be1d8a5 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/HalfFloatValuesComparatorSource.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.fielddata.fieldcomparator; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.util.BitSet; +import org.opensearch.index.fielddata.FieldData; +import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.fielddata.NumericDoubleValues; +import org.opensearch.index.fielddata.SortedNumericDoubleValues; +import org.opensearch.index.search.comparators.HalfFloatComparator; +import org.opensearch.search.MultiValueMode; + +import java.io.IOException; + +/** + * Comparator source for half_float values. + * + * @opensearch.internal + */ +public class HalfFloatValuesComparatorSource extends FloatValuesComparatorSource { + private final IndexNumericFieldData indexFieldData; + + public HalfFloatValuesComparatorSource( + IndexNumericFieldData indexFieldData, + Object missingValue, + MultiValueMode sortMode, + Nested nested + ) { + super(indexFieldData, missingValue, sortMode, nested); + this.indexFieldData = indexFieldData; + } + + @Override + public FieldComparator newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); + + final float fMissingValue = (Float) missingObject(missingValue, reversed); + // NOTE: it's important to pass null as a missing value in the constructor so that + // the comparator doesn't check docsWithField since we replace missing values in select() + return new HalfFloatComparator(numHits, fieldname, null, reversed, enableSkipping && this.enableSkipping) { + @Override + public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { + return new HalfFloatLeafComparator(context) { + @Override + protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { + return HalfFloatValuesComparatorSource.this.getNumericDocValues(context, fMissingValue).getRawFloatValues(); + } + }; + } + }; + } + + private NumericDoubleValues getNumericDocValues(LeafReaderContext context, float missingValue) throws IOException { + final SortedNumericDoubleValues values = indexFieldData.load(context).getDoubleValues(); + if (nested == null) { + return FieldData.replaceMissing(sortMode.select(values), missingValue); + } else { + final BitSet rootDocs = nested.rootDocs(context); + final DocIdSetIterator innerDocs = nested.innerDocs(context); + final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE; + return sortMode.select(values, missingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/search/comparators/HalfFloatComparator.java b/server/src/main/java/org/opensearch/index/search/comparators/HalfFloatComparator.java new file mode 100644 index 0000000000000..6244fa647b042 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/search/comparators/HalfFloatComparator.java @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.search.comparators; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.sandbox.document.HalfFloatPoint; +import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.comparators.NumericComparator; + +import java.io.IOException; + +/** + * The comparator for half_float numeric type. + * Comparator based on {@link Float#compare} for {@code numHits}. This comparator provides a + * skipping functionality – an iterator that can skip over non-competitive documents. + */ +public class HalfFloatComparator extends NumericComparator { + private final float[] values; + protected float topValue; + protected float bottom; + + public HalfFloatComparator(int numHits, String field, Float missingValue, boolean reverse, boolean enableSkipping) { + super(field, missingValue != null ? missingValue : 0.0f, reverse, enableSkipping, HalfFloatPoint.BYTES); + values = new float[numHits]; + } + + @Override + public int compare(int slot1, int slot2) { + return Float.compare(values[slot1], values[slot2]); + } + + @Override + public void setTopValue(Float value) { + super.setTopValue(value); + topValue = value; + } + + @Override + public Float value(int slot) { + return Float.valueOf(values[slot]); + } + + @Override + public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { + return new HalfFloatLeafComparator(context); + } + + /** Leaf comparator for {@link HalfFloatComparator} that provides skipping functionality */ + public class HalfFloatLeafComparator extends NumericLeafComparator { + + public HalfFloatLeafComparator(LeafReaderContext context) throws IOException { + super(context); + } + + private float getValueForDoc(int doc) throws IOException { + if (docValues.advanceExact(doc)) { + return Float.intBitsToFloat((int) docValues.longValue()); + } else { + return missingValue; + } + } + + @Override + public void setBottom(int slot) throws IOException { + bottom = values[slot]; + super.setBottom(slot); + } + + @Override + public int compareBottom(int doc) throws IOException { + return Float.compare(bottom, getValueForDoc(doc)); + } + + @Override + public int compareTop(int doc) throws IOException { + return Float.compare(topValue, getValueForDoc(doc)); + } + + @Override + public void copy(int slot, int doc) throws IOException { + values[slot] = getValueForDoc(doc); + super.copy(slot, doc); + } + + @Override + protected int compareMissingValueWithBottomValue() { + return Float.compare(missingValue, bottom); + } + + @Override + protected int compareMissingValueWithTopValue() { + return Float.compare(missingValue, topValue); + } + + @Override + protected void encodeBottom(byte[] packedValue) { + HalfFloatPoint.encodeDimension(bottom, packedValue, 0); + } + + @Override + protected void encodeTop(byte[] packedValue) { + HalfFloatPoint.encodeDimension(topValue, packedValue, 0); + } + } +} From 3f30511ff78ce3f0c379a22fcbd665b82a7fb216 Mon Sep 17 00:00:00 2001 From: Bansi Kasundra <66969140+kasundra07@users.noreply.github.com> Date: Tue, 14 Nov 2023 10:09:22 -0800 Subject: [PATCH 147/178] Fixing the tests for concurrent search (#11076) * Fixing the tests for concurrent search Signed-off-by: Bansi Kasundra * Remove changes for non-flaky tests Signed-off-by: Bansi Kasundra --------- Signed-off-by: Bansi Kasundra --- .../search/fetch/subphase/InnerHitsIT.java | 4 + .../fetch/subphase/MatchedQueriesIT.java | 20 +++-- .../search/query/SearchQueryIT.java | 73 ++++++++++++++----- 3 files changed, 74 insertions(+), 23 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java index 9b3e1337418cc..1a730c01e4890 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java @@ -897,6 +897,7 @@ public void testNestedSource() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("index1"); // the field name (comments.message) used for source filtering should be the same as when using that field for // other features (like in the query dsl or aggs) in order for consistency: @@ -973,6 +974,7 @@ public void testInnerHitsWithIgnoreUnmapped() throws Exception { client().prepareIndex("index1").setId("1").setSource("nested_type", Collections.singletonMap("key", "value")).get(); client().prepareIndex("index2").setId("3").setSource("key", "value").get(); refresh(); + indexRandomForConcurrentSearch("index1", "index2"); SearchResponse response = client().prepareSearch("index1", "index2") .setQuery( @@ -1002,6 +1004,7 @@ public void testUseMaxDocInsteadOfSize() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("index2"); QueryBuilder query = nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg).innerHit( new InnerHitBuilder().setSize(ArrayUtil.MAX_ARRAY_LENGTH - 1) ); @@ -1019,6 +1022,7 @@ public void testTooHighResultWindow() throws Exception { ) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("index2"); SearchResponse response = client().prepareSearch("index2") .setQuery( nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg).innerHit( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java index 23b5d0cab0697..83cedb8c20e1d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java @@ -91,6 +91,7 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { client().prepareIndex("test").setId("2").setSource("name", "test2", "number", 2).get(); client().prepareIndex("test").setId("3").setSource("name", "test3", "number", 3).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery( @@ -141,6 +142,7 @@ public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { client().prepareIndex("test").setId("2").setSource("name", "test").get(); client().prepareIndex("test").setId("3").setSource("name", "test").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -192,6 +194,7 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex client().prepareIndex("test").setId("2").setSource("name", "test", "title", "title2").get(); client().prepareIndex("test").setId("3").setSource("name", "test", "title", "title3").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(boolQuery().must(matchAllQuery()).filter(termsQuery("title", "title1", "title2", "title3").queryName("title"))) @@ -224,12 +227,13 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex } } - public void testRegExpQuerySupportsName() { + public void testRegExpQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.regexpQuery("title", "title1").queryName("regex")) @@ -246,12 +250,13 @@ public void testRegExpQuerySupportsName() { } } - public void testPrefixQuerySupportsName() { + public void testPrefixQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.prefixQuery("title", "title").queryName("prefix")) @@ -268,12 +273,13 @@ public void testPrefixQuerySupportsName() { } } - public void testFuzzyQuerySupportsName() { + public void testFuzzyQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.fuzzyQuery("title", "titel1").queryName("fuzzy")) @@ -290,12 +296,13 @@ public void testFuzzyQuerySupportsName() { } } - public void testWildcardQuerySupportsName() { + public void testWildcardQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.wildcardQuery("title", "titl*").queryName("wildcard")) @@ -312,12 +319,13 @@ public void testWildcardQuerySupportsName() { } } - public void testSpanFirstQuerySupportsName() { + public void testSpanFirstQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1 title2").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.spanFirstQuery(QueryBuilders.spanTermQuery("title", "title1"), 10).queryName("span")) @@ -344,6 +352,7 @@ public void testMatchedWithShould() throws Exception { client().prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); client().prepareIndex("test").setId("2").setSource("content", "consectetur adipisicing elit").get(); refresh(); + indexRandomForConcurrentSearch("test"); // Execute search at least two times to load it in cache int iter = scaledRandomIntBetween(2, 10); @@ -378,6 +387,7 @@ public void testMatchedWithWrapperQuery() throws Exception { client().prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); refresh(); + indexRandomForConcurrentSearch("test"); MatchQueryBuilder matchQueryBuilder = matchQuery("content", "amet").queryName("abc"); BytesReference matchBytes = XContentHelper.toXContent(matchQueryBuilder, MediaTypeRegistry.JSON, false); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index 53bded1fc493c..d2bca41760ff6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -465,6 +465,7 @@ public void testQueryStringAnalyzedWildcard() throws Exception { client().prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("value*")).get(); assertHitCount(searchResponse, 1L); @@ -482,11 +483,12 @@ public void testQueryStringAnalyzedWildcard() throws Exception { assertHitCount(searchResponse, 1L); } - public void testLowercaseExpandedTerms() { + public void testLowercaseExpandedTerms() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1")).get(); assertHitCount(searchResponse, 1L); @@ -499,7 +501,7 @@ public void testLowercaseExpandedTerms() { } // Issue #3540 - public void testDateRangeInQueryString() { + public void testDateRangeInQueryString() throws InterruptedException { // the mapping needs to be provided upfront otherwise we are not sure how many failures we get back // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date", "future", "type=date")); @@ -510,6 +512,7 @@ public void testDateRangeInQueryString() { client().prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")).get(); assertHitCount(searchResponse, 1L); @@ -525,7 +528,7 @@ public void testDateRangeInQueryString() { } // Issue #7880 - public void testDateRangeInQueryStringWithTimeZone_7880() { + public void testDateRangeInQueryStringWithTimeZone_7880() throws InterruptedException { // the mapping needs to be provided upfront otherwise we are not sure how many failures we get back // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date")); @@ -536,6 +539,7 @@ public void testDateRangeInQueryStringWithTimeZone_7880() { client().prepareIndex("test").setId("1").setSource("past", now).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("past:[now-1m/m TO now+1m/m]").timeZone(timeZone.getId())) .get(); @@ -543,7 +547,7 @@ public void testDateRangeInQueryStringWithTimeZone_7880() { } // Issue #10477 - public void testDateRangeInQueryStringWithTimeZone_10477() { + public void testDateRangeInQueryStringWithTimeZone_10477() throws InterruptedException { // the mapping needs to be provided upfront otherwise we are not sure how many failures we get back // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date")); @@ -552,6 +556,7 @@ public void testDateRangeInQueryStringWithTimeZone_10477() { client().prepareIndex("test").setId("2").setSource("past", "2015-04-06T00:00:00+0000").get(); refresh(); + indexRandomForConcurrentSearch("test"); // Timezone set with dates SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("past:[2015-04-06T00:00:00+0200 TO 2015-04-06T23:00:00+0200]")) @@ -725,6 +730,7 @@ public void testPassQueryOrFilterAsJSONString() throws Exception { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").setRefreshPolicy(IMMEDIATE).get(); + indexRandomForConcurrentSearch("test"); WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }"); assertHitCount(client().prepareSearch().setQuery(wrapper).get(), 1L); @@ -741,6 +747,7 @@ public void testFiltersWithCustomCacheKey() throws Exception { client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))).get(); assertHitCount(searchResponse, 1L); @@ -782,6 +789,7 @@ public void testMatchQueryFuzzy() throws Exception { client().prepareIndex("test").setId("1").setSource("text", "Unit"), client().prepareIndex("test").setId("2").setSource("text", "Unity") ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness(Fuzziness.ZERO)).get(); assertHitCount(searchResponse, 0L); @@ -913,11 +921,12 @@ public void testMultiMatchQueryZeroTermsQuery() { assertHitCount(searchResponse, 2L); } - public void testMultiMatchQueryMinShouldMatch() { + public void testMultiMatchQueryMinShouldMatch() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); client().prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2"); @@ -959,12 +968,13 @@ public void testMultiMatchQueryMinShouldMatch() { assertHitCount(searchResponse, 0L); } - public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws IOException { + public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws IOException, InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); client().prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder boolQuery = boolQuery().must(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)); SearchResponse searchResponse = client().prepareSearch().setQuery(boolQuery).get(); @@ -991,12 +1001,13 @@ public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws assertHitCount(searchResponse, 0L); } - public void testFuzzyQueryString() { + public void testFuzzyQueryString() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("str", "foobar", "date", "2012-02-01", "num", 12).get(); client().prepareIndex("test").setId("2").setSource("str", "fred", "date", "2012-02-05", "num", 20).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("str:foobaz~1")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, 1L); @@ -1015,6 +1026,7 @@ public void testQuotedQueryStringWithBoost() throws InterruptedException { client().prepareIndex("test").setId("2").setSource("important", "nothing important", "less_important", "phrase match") ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important")) .get(); @@ -1027,11 +1039,12 @@ public void testQuotedQueryStringWithBoost() throws InterruptedException { ); } - public void testSpecialRangeSyntaxInQueryString() { + public void testSpecialRangeSyntaxInQueryString() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("str", "foobar", "date", "2012-02-01", "num", 12).get(); client().prepareIndex("test").setId("2").setSource("str", "fred", "date", "2012-02-05", "num", 20).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("num:>19")).get(); assertHitCount(searchResponse, 1L); @@ -1137,6 +1150,7 @@ public void testTermsQuery() throws Exception { public void testTermsLookupFilter() throws Exception { assertAcked(prepareCreate("lookup").setMapping("terms", "type=text", "other", "type=text")); + indexRandomForConcurrentSearch("lookup"); assertAcked( prepareCreate("lookup2").setMapping( jsonBuilder().startObject() @@ -1152,8 +1166,11 @@ public void testTermsLookupFilter() throws Exception { .endObject() ) ); + indexRandomForConcurrentSearch("lookup2"); assertAcked(prepareCreate("lookup3").setMapping("_source", "enabled=false", "terms", "type=text")); + indexRandomForConcurrentSearch("lookup3"); assertAcked(prepareCreate("test").setMapping("term", "type=text")); + indexRandomForConcurrentSearch("test"); indexRandom( true, @@ -1279,6 +1296,7 @@ public void testBasicQueryById() throws Exception { client().prepareIndex("test").setId("3").setSource("field1", "value3").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); assertHitCount(searchResponse, 2L); assertThat(searchResponse.getHits().getHits().length, equalTo(2)); @@ -1333,6 +1351,7 @@ public void testNumericTermsAndRanges() throws Exception { .setSource("num_byte", 17, "num_short", 17, "num_integer", 17, "num_long", 17, "num_float", 17, "num_double", 17) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse; logger.info("--> term query on 1"); @@ -1439,6 +1458,7 @@ public void testNumericRangeFilter_2826() throws Exception { client().prepareIndex("test").setId("3").setSource("field1", "test2", "num_long", 3).get(); client().prepareIndex("test").setId("4").setSource("field1", "test2", "num_long", 4).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setPostFilter(boolQuery().should(rangeQuery("num_long").from(1).to(2)).should(rangeQuery("num_long").from(3).to(4))) @@ -1535,7 +1555,7 @@ public void testSimpleSpan() throws IOException, ExecutionException, Interrupted assertHitCount(searchResponse, 3L); } - public void testSpanMultiTermQuery() throws IOException { + public void testSpanMultiTermQuery() throws IOException, InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("description", "foo other anything bar", "count", 1).get(); @@ -1543,6 +1563,7 @@ public void testSpanMultiTermQuery() throws IOException { client().prepareIndex("test").setId("3").setSource("description", "foo other", "count", 3).get(); client().prepareIndex("test").setId("4").setSource("description", "fop", "count", 4).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(spanOrQuery(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))) @@ -1574,6 +1595,7 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc client().prepareIndex("test").setId("1").setSource("description", "the quick brown fox jumped over the lazy dog").get(); client().prepareIndex("test").setId("2").setSource("description", "the quick black fox leaped over the sleeping dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery( @@ -1612,7 +1634,7 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc assertHitCount(searchResponse, 1L); } - public void testSimpleDFSQuery() throws IOException { + public void testSimpleDFSQuery() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -1657,6 +1679,7 @@ public void testSimpleDFSQuery() throws IOException { .setSource("online", true, "ts", System.currentTimeMillis() - 123123, "type", "bs") .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) @@ -1679,8 +1702,9 @@ public void testSimpleDFSQuery() throws IOException { assertNoFailures(response); } - public void testMultiFieldQueryString() { + public void testMultiFieldQueryString() throws InterruptedException { client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + indexRandomForConcurrentSearch("test"); logger.info("regular"); assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("value1").field("field1").field("field2")).get(), 1); @@ -1700,11 +1724,12 @@ public void testMultiFieldQueryString() { } // see #3797 - public void testMultiMatchLenientIssue3797() { + public void testMultiMatchLenientIssue3797() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", 123, "field2", "value2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)) @@ -1728,6 +1753,7 @@ public void testMinScore() throws ExecutionException, InterruptedException { client().prepareIndex("test").setId("3").setSource("score", 2.0).get(); client().prepareIndex("test").setId("4").setSource("score", 0.5).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(functionScoreQuery(ScoreFunctionBuilders.fieldValueFactorFunction("score").missing(1.0)).setMinScore(1.5f)) @@ -1737,12 +1763,13 @@ public void testMinScore() throws ExecutionException, InterruptedException { assertSecondHit(searchResponse, hasId("1")); } - public void testQueryStringWithSlopAndFields() { + public void testQueryStringWithSlopAndFields() throws InterruptedException { assertAcked(prepareCreate("test")); client().prepareIndex("test").setId("1").setSource("desc", "one two three", "type", "customer").get(); client().prepareIndex("test").setId("2").setSource("desc", "one two three", "type", "product").get(); refresh(); + indexRandomForConcurrentSearch("test"); { SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")) @@ -1809,6 +1836,7 @@ public void testRangeQueryWithTimeZone() throws Exception { .setId("4") .setSource("date", Instant.now().atZone(ZoneOffset.ofHours(1)).toInstant().toEpochMilli(), "num", 4) ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00")) @@ -1948,6 +1976,7 @@ public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedE public void testQueryStringParserCache() throws Exception { createIndex("test"); indexRandom(true, false, client().prepareIndex("test").setId("1").setSource("nameTokens", "xyz")); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) @@ -1978,6 +2007,7 @@ public void testRangeQueryRangeFields_24744() throws Exception { .setSource(jsonBuilder().startObject().startObject("int_range").field("gte", 10).field("lte", 20).endObject().endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); RangeQueryBuilder range = new RangeQueryBuilder("int_range").relation("intersects").from(Integer.MIN_VALUE).to(Integer.MAX_VALUE); SearchResponse searchResponse = client().prepareSearch("test").setQuery(range).get(); @@ -2013,6 +2043,7 @@ public void testNestedQueryWithFieldAlias() throws Exception { index("index", "_doc", "1", source); refresh(); + indexRandomForConcurrentSearch("index"); QueryBuilder nestedQuery = QueryBuilders.nestedQuery( "section", @@ -2041,6 +2072,7 @@ public void testFieldAliasesForMetaFields() throws Exception { IndexRequestBuilder indexRequest = client().prepareIndex("test").setId("1").setRouting("custom").setSource("field", "value"); indexRandom(true, false, indexRequest); + indexRandomForConcurrentSearch("test"); client().admin() .cluster() .prepareUpdateSettings() @@ -2073,7 +2105,7 @@ public void testFieldAliasesForMetaFields() throws Exception { /** * Test that wildcard queries on keyword fields get normalized */ - public void testWildcardQueryNormalizationOnKeywordField() { + public void testWildcardQueryNormalizationOnKeywordField() throws InterruptedException { assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -2084,6 +2116,7 @@ public void testWildcardQueryNormalizationOnKeywordField() { ); client().prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); refresh(); + indexRandomForConcurrentSearch("test"); { WildcardQueryBuilder wildCardQuery = wildcardQuery("field1", "Bb*"); @@ -2099,7 +2132,7 @@ public void testWildcardQueryNormalizationOnKeywordField() { /** * Test that wildcard queries on text fields get normalized */ - public void testWildcardQueryNormalizationOnTextField() { + public void testWildcardQueryNormalizationOnTextField() throws InterruptedException { assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -2111,6 +2144,7 @@ public void testWildcardQueryNormalizationOnTextField() { ); client().prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); refresh(); + indexRandomForConcurrentSearch("test"); { // test default case insensitivity: false @@ -2130,10 +2164,11 @@ public void testWildcardQueryNormalizationOnTextField() { } /** tests wildcard case sensitivity */ - public void testWildcardCaseSensitivity() { + public void testWildcardCaseSensitivity() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("field", "type=text")); client().prepareIndex("test").setId("1").setSource("field", "lowercase text").get(); refresh(); + indexRandomForConcurrentSearch("test"); // test case sensitive SearchResponse response = client().prepareSearch("test").setQuery(wildcardQuery("field", "Text").caseInsensitive(false)).get(); @@ -2151,7 +2186,7 @@ public void testWildcardCaseSensitivity() { * Reserved characters should be excluded when the normalization is applied for keyword fields. * See https://github.com/elastic/elasticsearch/issues/46300 for details. */ - public void testWildcardQueryNormalizationKeywordSpecialCharacters() { + public void testWildcardQueryNormalizationKeywordSpecialCharacters() throws InterruptedException { assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -2163,6 +2198,7 @@ public void testWildcardQueryNormalizationKeywordSpecialCharacters() { ); client().prepareIndex("test").setId("1").setSource("field", "label-1").get(); refresh(); + indexRandomForConcurrentSearch("test"); WildcardQueryBuilder wildCardQuery = wildcardQuery("field", "la*"); SearchResponse searchResponse = client().prepareSearch().setQuery(wildCardQuery).get(); @@ -2213,11 +2249,12 @@ public Map> getTokenizers() { * set for fuzzy queries with "constant_score" rewrite nested inside a `span_multi` query and would cause NPEs due to an unset * {@link AttributeSource}. */ - public void testIssueFuzzyInsideSpanMulti() { + public void testIssueFuzzyInsideSpanMulti() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field", "foobarbaz").get(); ensureGreen(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder query = boolQuery().filter(spanMultiTermQueryBuilder(fuzzyQuery("field", "foobarbiz").rewrite("constant_score"))); SearchResponse response = client().prepareSearch("test").setQuery(query).get(); From 39308bf5d962a70fe0e3103a354899ce506562e6 Mon Sep 17 00:00:00 2001 From: Ethan Emoto <70109490+e-emoto@users.noreply.github.com> Date: Tue, 14 Nov 2023 10:34:29 -0800 Subject: [PATCH 148/178] Fixed test cases for concurrent search (#11184) Updated flaky and failing test cases after changes to indexRandomForConcurrentSearch Remove unnecessary call to indexRandomForConcurrentSearch in testSearchAndRelocateConcurrently Signed-off-by: Ethan Emoto --- .../org/opensearch/search/fetch/FetchSubPhasePluginIT.java | 4 ---- .../search/functionscore/RandomScoreFunctionIT.java | 3 +++ 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java index 77ddc5bf636f0..87f2153eb800f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java @@ -100,10 +100,6 @@ protected Collection> nodePlugins() { @SuppressWarnings("unchecked") public void testPlugin() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11112", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); client().admin() .indices() .prepareCreate("test") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java index 54cfcb7e6a683..69e30fc879dd8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java @@ -135,6 +135,7 @@ public void testConsistentHitsWithSameSeed() throws Exception { } flush(); refresh(); + indexRandomForConcurrentSearch("test"); int outerIters = scaledRandomIntBetween(10, 20); for (int o = 0; o < outerIters; o++) { final int seed = randomInt(); @@ -299,6 +300,7 @@ public void testSeedReportedInExplain() throws Exception { index("test", "type", "1", jsonBuilder().startObject().endObject()); flush(); refresh(); + indexRandomForConcurrentSearch("test"); int seed = 12345678; @@ -318,6 +320,7 @@ public void testSeedAndNameReportedInExplain() throws Exception { index("test", "type", "1", jsonBuilder().startObject().endObject()); flush(); refresh(); + indexRandomForConcurrentSearch("test"); int seed = 12345678; From 3ff7e9710ab4c4625d7a8100aab0451e6f9c8d13 Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Tue, 14 Nov 2023 11:10:26 -0800 Subject: [PATCH 149/178] Add indexRandomForConcurrentSearch for tests: (#11181) * MultiSearchIT.java * SimpleNestedIT.java * SearchPreferenceIT.java * SimpleQueryStringIT.java * ScriptQuerySearchIT.java Signed-off-by: Jay Deng --- .../search/msearch/MultiSearchIT.java | 6 ++-- .../search/nested/SimpleNestedIT.java | 28 ++++++++++++++++++- .../search/preference/SearchPreferenceIT.java | 7 +++-- .../search/query/SimpleQueryStringIT.java | 1 + .../scriptfilter/ScriptQuerySearchIT.java | 5 +++- 5 files changed, 41 insertions(+), 6 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java index bc1d2833ecbbf..b35208941d2a2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java @@ -71,12 +71,13 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testSimpleMultiSearch() { + public void testSimpleMultiSearch() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "xxx").get(); client().prepareIndex("test").setId("2").setSource("field", "yyy").get(); refresh(); + indexRandomForConcurrentSearch("test"); MultiSearchResponse response = client().prepareMultiSearch() .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) @@ -94,13 +95,14 @@ public void testSimpleMultiSearch() { assertFirstHit(response.getResponses()[1].getResponse(), hasId("2")); } - public void testSimpleMultiSearchMoreRequests() { + public void testSimpleMultiSearchMoreRequests() throws InterruptedException { createIndex("test"); int numDocs = randomIntBetween(0, 16); for (int i = 0; i < numDocs; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", MediaTypeRegistry.JSON).get(); } refresh(); + indexRandomForConcurrentSearch("test"); int numSearchRequests = randomIntBetween(1, 64); MultiSearchRequest request = new MultiSearchRequest(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index 656e7b2e366ed..8b375841c2913 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -126,6 +126,7 @@ public void testSimpleNested() throws Exception { .get(); waitForRelocation(ClusterHealthStatus.GREEN); + indexRandomForConcurrentSearch("test"); GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getSourceAsBytes(), notNullValue()); @@ -500,6 +501,10 @@ public void testExplain() throws Exception { } public void testSimpleNestedSorting() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put("index.refresh_interval", -1)) .setMapping( @@ -569,6 +574,7 @@ public void testSimpleNestedSorting() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.matchAllQuery()) @@ -598,6 +604,10 @@ public void testSimpleNestedSorting() throws Exception { } public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put("index.refresh_interval", -1)) .setMapping( @@ -677,6 +687,7 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test") .setQuery(QueryBuilders.matchAllQuery()) @@ -729,6 +740,10 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { } public void testNestedSortWithMultiLevelFiltering() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setMapping( "{\n" @@ -865,6 +880,7 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); // access id = 1, read, max value, asc, should use grault and quxx SearchResponse searchResponse = client().prepareSearch() @@ -971,7 +987,7 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { // https://github.com/elastic/elasticsearch/issues/31554 public void testLeakingSortValues() throws Exception { assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11065", + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) ); assertAcked( @@ -1063,6 +1079,10 @@ public void testLeakingSortValues() throws Exception { } public void testSortNestedWithNestedFilter() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setMapping( XContentFactory.jsonBuilder() @@ -1222,6 +1242,7 @@ public void testSortNestedWithNestedFilter() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); // Without nested filter SearchResponse searchResponse = client().prepareSearch() @@ -1460,6 +1481,10 @@ public void testSortNestedWithNestedFilter() throws Exception { // Issue #9305 public void testNestedSortingWithNestedFilterAsFilter() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -1602,6 +1627,7 @@ public void testNestedSortingWithNestedFilterAsFilter() throws Exception { .get(); assertTrue(indexResponse2.getShardInfo().getSuccessful() > 0); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .addSort(SortBuilders.fieldSort("users.first").setNestedPath("users").order(SortOrder.ASC)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java index 97fe05f5b9747..6e40c08ed08a1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java @@ -160,12 +160,13 @@ public void testNoPreferenceRandom() throws Exception { assertThat(firstNodeId, not(equalTo(secondNodeId))); } - public void testSimplePreference() { + public void testSimplePreference() throws InterruptedException { client().admin().indices().prepareCreate("test").setSettings("{\"number_of_replicas\": 1}", MediaTypeRegistry.JSON).get(); ensureGreen(); client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -264,7 +265,7 @@ private void assertSearchOnRandomNodes(SearchRequestBuilder request) { assertThat(hitNodes.size(), greaterThan(1)); } - public void testCustomPreferenceUnaffectedByOtherShardMovements() { + public void testCustomPreferenceUnaffectedByOtherShardMovements() throws InterruptedException { /* * Custom preferences can be used to encourage searches to go to a consistent set of shard copies, meaning that other copies' data @@ -283,6 +284,7 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { ensureGreen(); client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); final String customPreference = randomAlphaOfLength(10); @@ -302,6 +304,7 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { prepareCreate("test2").setSettings(Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, replicasInNewIndex)) ); ensureGreen(); + indexRandomForConcurrentSearch("test2"); assertSearchesSpecificNode("test", customPreference, nodeId); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java index 017d28ef3a2a6..d8902238005da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java @@ -437,6 +437,7 @@ public void testSimpleQueryStringOnIndexMetaField() throws Exception { client().prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("test").field("_index")).get(); assertHitCount(searchResponse, 2L); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java index 34967528f2c4f..ae00904f237a5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -161,6 +161,7 @@ public void testCustomScriptBinaryField() throws Exception { .get(); flush(); refresh(); + indexRandomForConcurrentSearch("my-index"); SearchResponse response = client().prepareSearch() .setQuery( @@ -213,6 +214,7 @@ public void testCustomScriptBoost() throws Exception { .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("running doc['num1'].value > 1"); SearchResponse response = client().prepareSearch() @@ -259,7 +261,7 @@ public void testCustomScriptBoost() throws Exception { assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); } - public void testDisallowExpensiveQueries() { + public void testDisallowExpensiveQueries() throws InterruptedException { try { assertAcked(prepareCreate("test-index").setMapping("num1", "type=double")); int docCount = 10; @@ -267,6 +269,7 @@ public void testDisallowExpensiveQueries() { client().prepareIndex("test-index").setId("" + i).setSource("num1", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); // Execute with search.allow_expensive_queries = null => default value = false => success Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap()); From 865766916ba04f6b5eeaf209a4b234b190708cca Mon Sep 17 00:00:00 2001 From: Ganesh Krishna Ramadurai Date: Tue, 14 Nov 2023 12:00:48 -0800 Subject: [PATCH 150/178] Fix tests for concurrent search (#11130) Signed-off-by: Ganesh Ramadurai Signed-off-by: Ganesh Ramadurai Co-authored-by: Ganesh Ramadurai --- .../org/opensearch/search/scroll/SearchScrollIT.java | 3 ++- .../java/org/opensearch/search/sort/FieldSortIT.java | 5 +++++ .../java/org/opensearch/search/sort/GeoDistanceIT.java | 3 ++- .../org/opensearch/search/sort/SortFromPluginIT.java | 2 ++ .../opensearch/search/source/MetadataFetchingIT.java | 10 ++++++---- .../org/opensearch/search/source/SourceFetchingIT.java | 3 ++- .../org/opensearch/search/suggest/SuggestSearchIT.java | 1 + 7 files changed, 20 insertions(+), 7 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java index 0eee136acac69..b2b6409580061 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java @@ -733,7 +733,7 @@ public void testInvalidScrollKeepAlive() throws IOException { * Ensures that we always create and retain search contexts on every target shards for a scroll request * regardless whether that query can be written to match_no_docs on some target shards or not. */ - public void testScrollRewrittenToMatchNoDocs() { + public void testScrollRewrittenToMatchNoDocs() throws InterruptedException { final int numShards = randomIntBetween(3, 5); assertAcked( client().admin() @@ -746,6 +746,7 @@ public void testScrollRewrittenToMatchNoDocs() { client().prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); client().prepareIndex("test").setId("3").setSource("created_date", "2020-01-03").get(); client().admin().indices().prepareRefresh("test").get(); + indexRandomForConcurrentSearch("test"); SearchResponse resp = null; try { int totalHits = 0; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index d4980a64a3977..9c788110af937 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -265,6 +265,10 @@ public void testIssue6614() throws ExecutionException, InterruptedException { } public void testTrackScores() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11189", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked(client().admin().indices().prepareCreate("test").setMapping("svalue", "type=keyword").get()); ensureGreen(); index( @@ -278,6 +282,7 @@ public void testTrackScores() throws Exception { jsonBuilder().startObject().field("id", "2").field("svalue", "bbb").field("ivalue", 200).field("dvalue", 0.2).endObject() ); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java index 6886f8d67589e..ff24a1f962332 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java @@ -602,7 +602,7 @@ public void testDistanceSortingNestedFields() throws Exception { /** * Issue 3073 */ - public void testGeoDistanceFilter() throws IOException { + public void testGeoDistanceFilter() throws IOException, InterruptedException { Version version = VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); double lat = 40.720611; @@ -620,6 +620,7 @@ public void testGeoDistanceFilter() throws IOException { assertAcked(prepareCreate("locations").setSettings(settings).setMapping(mapping)); client().prepareIndex("locations").setId("1").setCreate(true).setSource(source).get(); refresh(); + indexRandomForConcurrentSearch("locations"); client().prepareGet("locations", "1").get(); SearchResponse result = client().prepareSearch("locations") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java index 7bcded86fcaa8..76e68781c72ba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java @@ -59,6 +59,7 @@ public void testPluginSort() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test").addSort(new CustomSortBuilder("field", SortOrder.ASC)).get(); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("3")); @@ -80,6 +81,7 @@ public void testPluginSortXContent() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); // builder -> json -> builder SearchResponse searchResponse = client().prepareSearch("test") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java index c98a38ea0bb97..a9c4bf841d9a1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java @@ -76,12 +76,13 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testSimple() { + public void testSimple() throws InterruptedException { assertAcked(prepareCreate("test")); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true).get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); @@ -93,12 +94,12 @@ public void testSimple() { assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); } - public void testInnerHits() { + public void testInnerHits() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("nested", "type=nested")); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value", "nested", Collections.singletonMap("title", "foo")).get(); refresh(); - + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .storedFields("_none_") .setFetchSource(false) @@ -119,12 +120,13 @@ public void testInnerHits() { assertThat(hits.getAt(0).getSourceAsString(), nullValue()); } - public void testWithRouting() { + public void testWithRouting() throws InterruptedException { assertAcked(prepareCreate("test")); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value").setRouting("toto").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").storedFields("_none_").setFetchSource(false).get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java index eeef5403fe898..0e9932051141b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java @@ -66,12 +66,13 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testSourceDefaultBehavior() { + public void testSourceDefaultBehavior() throws InterruptedException { createIndex("test"); ensureGreen(); index("test", "type1", "1", "field", "value"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").get(); assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java index 32bb0e34054bb..dd6179d3820be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java @@ -287,6 +287,7 @@ public void testSizeOneShard() throws Exception { index("test", "type1", Integer.toString(i), "text", "abc" + i); } refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellchecker")).get(); assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); From 675dd416156a628307f7bad0a5ea8794f9d9b942 Mon Sep 17 00:00:00 2001 From: Wei Wang <93847013+weiwang118@users.noreply.github.com> Date: Tue, 14 Nov 2023 16:56:04 -0800 Subject: [PATCH 151/178] Fix tests for concurrent Search: TermsDocCountErrorIT (#11093) Signed-off-by: Wei Wang Co-authored-by: Wei Wang --- .../search/aggregations/bucket/TermsDocCountErrorIT.java | 1 + .../opensearch/test/ParameterizedOpenSearchIntegTestCase.java | 1 + 2 files changed, 2 insertions(+) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 3fcf4b5d533d4..b355ce6d7a8dd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -225,6 +225,7 @@ public void setupSuiteScopeCluster() throws Exception { } indexRandom(true, builders); + indexRandomForMultipleSlices("idx"); ensureSearchable(); } diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java index f8813a8c5afa9..edda6bf5603f7 100644 --- a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java @@ -47,6 +47,7 @@ public void afterTests() { client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settingsToUnset).get(); } + // This method shouldn't be called in setupSuiteScopeCluster(). Only call this method inside single test. public void indexRandomForConcurrentSearch(String... indices) throws InterruptedException { if (dynamicSettings.get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()).equals("true")) { indexRandomForMultipleSlices(indices); From c1866d9e70ef87d69cc6c49b3234ac5269c43a28 Mon Sep 17 00:00:00 2001 From: Chaitanya Gohel <104654647+gashutos@users.noreply.github.com> Date: Wed, 15 Nov 2023 20:49:21 +0530 Subject: [PATCH 152/178] Skipping half_float sort tests for 2.11.0 as well (#11214) Signed-off-by: Chaitanya Gohel --- .../resources/rest-api-spec/test/search/260_sort_mixed.yml | 4 ++-- .../resources/rest-api-spec/test/search/90_search_after.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml index a04dc308b2a06..7385eef051243 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml @@ -124,8 +124,8 @@ --- "search across indices with mixed long and double and float numeric types": - skip: - version: " - 2.10.99" - reason: half float was broken before 2.11 + version: " - 2.11.0" + reason: half float was broken before 2.11.1 - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml index 1563daba9de6d..41c8626afcd75 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml @@ -324,8 +324,8 @@ --- "half float": - skip: - version: " - 2.10.99" - reason: half_float was broken for 2.10 and earlier + version: " - 2.11.0" + reason: half_float was broken for 2.11.0 and earlier - do: indices.create: From 379acf30cce5fe3b38bbd806183bf06f6937450a Mon Sep 17 00:00:00 2001 From: David Z <38449481+dzane17@users.noreply.github.com> Date: Wed, 15 Nov 2023 09:19:28 -0800 Subject: [PATCH 153/178] Per request latency onPhaseEnd fix (#10934) Signed-off-by: David Zane --- CHANGELOG.md | 1 + .../org/opensearch/action/search/AbstractSearchAsyncAction.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2165fcb4700fb..99c31f00278b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -147,6 +147,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) - Fix SuggestSearch.testSkipDuplicates by forceing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068)) - Adding version condition while adding geoshape doc values to the index, to ensure backward compatibility.([#11095](https://github.com/opensearch-project/OpenSearch/pull/11095)) +- Fix per request latency last phase not tracked ([#10934](https://github.com/opensearch-project/OpenSearch/pull/10934)) ### Security diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 14f57218ae1dc..d27bc931e827b 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -696,9 +696,9 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At searchContextId = null; } } + onPhaseEnd(); listener.onResponse(buildSearchResponse(internalSearchResponse, failures, scrollId, searchContextId)); } - onPhaseEnd(); setCurrentPhase(null); } From c0c76e6db6b6a39401700de54f7aa64e2cc6f9f1 Mon Sep 17 00:00:00 2001 From: Martin Gaievski Date: Wed, 15 Nov 2023 12:01:47 -0800 Subject: [PATCH 154/178] Passing IndicesService to ingest processor factory with processor params (#10307) * Passing IngestService to processor factory with processor params Signed-off-by: Martin Gaievski * Fixed typo in the changlelog entry for this PR Signed-off-by: Martin Gaievski --------- Signed-off-by: Martin Gaievski --- CHANGELOG.md | 1 + .../org/opensearch/ingest/IngestService.java | 7 +++++-- .../java/org/opensearch/ingest/Processor.java | 7 ++++++- .../main/java/org/opensearch/node/Node.java | 20 ++++++++++--------- .../opensearch/ingest/IngestServiceTests.java | 15 +++++++++----- .../snapshots/SnapshotResiliencyTests.java | 3 ++- 6 files changed, 35 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99c31f00278b8..202e0fa910c55 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,6 +103,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) - Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) - [Streaming Indexing] Introduce new experimental server HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) ([#9672](https://github.com/opensearch-project/OpenSearch/pull/9672)) +- Allowing pipeline processors to access index mapping info by passing ingest service ref as part of the processor factory parameters ([#10307](https://github.com/opensearch-project/OpenSearch/pull/10307)) ### Dependencies - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index baf357a4bc0d5..2d4439e86461b 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -72,6 +72,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; import org.opensearch.index.analysis.AnalysisRegistry; +import org.opensearch.indices.IndicesService; import org.opensearch.plugins.IngestPlugin; import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ThreadPool; @@ -128,7 +129,8 @@ public IngestService( ScriptService scriptService, AnalysisRegistry analysisRegistry, List ingestPlugins, - Client client + Client client, + IndicesService indicesService ) { this.clusterService = clusterService; this.scriptService = scriptService; @@ -143,7 +145,8 @@ public IngestService( (delay, command) -> threadPool.schedule(command, TimeValue.timeValueMillis(delay), ThreadPool.Names.GENERIC), this, client, - threadPool.generic()::execute + threadPool.generic()::execute, + indicesService ) ); this.threadPool = threadPool; diff --git a/server/src/main/java/org/opensearch/ingest/Processor.java b/server/src/main/java/org/opensearch/ingest/Processor.java index 6097045a87e21..ecae1c139ea5e 100644 --- a/server/src/main/java/org/opensearch/ingest/Processor.java +++ b/server/src/main/java/org/opensearch/ingest/Processor.java @@ -36,6 +36,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.env.Environment; import org.opensearch.index.analysis.AnalysisRegistry; +import org.opensearch.indices.IndicesService; import org.opensearch.script.ScriptService; import org.opensearch.threadpool.Scheduler; @@ -156,6 +157,8 @@ class Parameters { */ public final Client client; + public final IndicesService indicesService; + public Parameters( Environment env, ScriptService scriptService, @@ -165,7 +168,8 @@ public Parameters( BiFunction scheduler, IngestService ingestService, Client client, - Consumer genericExecutor + Consumer genericExecutor, + IndicesService indicesService ) { this.env = env; this.scriptService = scriptService; @@ -176,6 +180,7 @@ public Parameters( this.ingestService = ingestService; this.client = client; this.genericExecutor = genericExecutor; + this.indicesService = indicesService; } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index e80b768074fc7..8fbb1a9306c6c 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -629,15 +629,6 @@ protected Node( metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); resourcesToClose.add(tracer::close); resourcesToClose.add(metricsRegistry::close); - final IngestService ingestService = new IngestService( - clusterService, - threadPool, - this.environment, - scriptService, - analysisModule.getAnalysisRegistry(), - pluginsService.filterPlugins(IngestPlugin.class), - client - ); final ClusterInfoService clusterInfoService = newClusterInfoService(settings, clusterService, threadPool, client); final UsageService usageService = new UsageService(); @@ -823,6 +814,17 @@ protected Node( recoverySettings ); + final IngestService ingestService = new IngestService( + clusterService, + threadPool, + this.environment, + scriptService, + analysisModule.getAnalysisRegistry(), + pluginsService.filterPlugins(IngestPlugin.class), + client, + indicesService + ); + final AliasValidator aliasValidator = new AliasValidator(); final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService, systemIndices); diff --git a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java index 1dfd41e96817e..2edfe87387c92 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java @@ -67,6 +67,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; +import org.opensearch.indices.IndicesService; import org.opensearch.plugins.IngestPlugin; import org.opensearch.script.MockScriptEngine; import org.opensearch.script.Script; @@ -149,7 +150,8 @@ public void testIngestPlugin() { null, null, Collections.singletonList(DUMMY_PLUGIN), - client + client, + mock(IndicesService.class) ); Map factories = ingestService.getProcessorFactories(); assertTrue(factories.containsKey("foo")); @@ -167,7 +169,8 @@ public void testIngestPluginDuplicate() { null, null, Arrays.asList(DUMMY_PLUGIN, DUMMY_PLUGIN), - client + client, + mock(IndicesService.class) ) ); assertTrue(e.getMessage(), e.getMessage().contains("already registered")); @@ -182,7 +185,8 @@ public void testExecuteIndexPipelineDoesNotExist() { null, null, Collections.singletonList(DUMMY_PLUGIN), - client + client, + mock(IndicesService.class) ); final IndexRequest indexRequest = new IndexRequest("_index").id("_id") .source(emptyMap()) @@ -1485,7 +1489,8 @@ public Map getProcessors(Processor.Parameters paramet null, null, Arrays.asList(testPlugin), - client + client, + mock(IndicesService.class) ); ingestService.addIngestClusterStateListener(ingestClusterStateListener); @@ -1702,7 +1707,7 @@ private static IngestService createWithProcessors(Map public Map getProcessors(final Processor.Parameters parameters) { return processors; } - }), client); + }), client, mock(IndicesService.class)); } private CompoundProcessor mockCompoundProcessor() { diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index b7a2baacba611..352eeb779599c 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2209,7 +2209,8 @@ public void onFailure(final Exception e) { scriptService, new AnalysisModule(environment, Collections.emptyList()).getAnalysisRegistry(), Collections.emptyList(), - client + client, + indicesService ), transportShardBulkAction, client, From 08db961e1914d93ce9d81e6da84e6e232b8d37e2 Mon Sep 17 00:00:00 2001 From: Ketan Verma <9292653+ketanv3@users.noreply.github.com> Date: Thu, 16 Nov 2023 03:28:42 +0530 Subject: [PATCH 155/178] Refactor common parts from the Rounding class into a separate 'round' package (#11023) * Refactor common parts from the Rounding class into a separate 'round' package Signed-off-by: Ketan Verma * Move RoundableTests from :server to :libs:opensearch-common module Signed-off-by: Ketan Verma * Address PR comments Signed-off-by: Ketan Verma * Replace assert with IllegalArgumentException for size checks Signed-off-by: Ketan Verma --------- Signed-off-by: Ketan Verma --- CHANGELOG.md | 1 + .../RoundableBenchmark.java} | 16 +- .../round/BidirectionalLinearSearcher.java | 59 +++++++ .../common/round/BinarySearcher.java | 43 +++++ .../opensearch/common/round/Roundable.java | 28 ++++ .../common/round/RoundableFactory.java | 39 +++++ .../opensearch/common/round/package-info.java | 12 ++ .../common/round/RoundableTests.java | 57 +++++++ .../java/org/opensearch/common/Rounding.java | 148 ++++-------------- .../org/opensearch/common/RoundingTests.java | 22 --- 10 files changed, 279 insertions(+), 146 deletions(-) rename benchmarks/src/main/java/org/opensearch/common/{ArrayRoundingBenchmark.java => round/RoundableBenchmark.java} (89%) create mode 100644 libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java create mode 100644 libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java create mode 100644 libs/common/src/main/java/org/opensearch/common/round/Roundable.java create mode 100644 libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java create mode 100644 libs/common/src/main/java/org/opensearch/common/round/package-info.java create mode 100644 libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 202e0fa910c55..8604f0e2914ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -130,6 +130,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) - Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) - [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999)) +- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) - Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) - Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) - Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) diff --git a/benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java similarity index 89% rename from benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java rename to benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java index 64c0a9e1d7aa6..4e07af452968b 100644 --- a/benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.common; +package org.opensearch.common.round; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; @@ -27,13 +27,13 @@ @Warmup(iterations = 3, time = 1) @Measurement(iterations = 1, time = 1) @BenchmarkMode(Mode.Throughput) -public class ArrayRoundingBenchmark { +public class RoundableBenchmark { @Benchmark - public void round(Blackhole bh, Options opts) { - Rounding.Prepared rounding = opts.supplier.get(); + public void floor(Blackhole bh, Options opts) { + Roundable roundable = opts.supplier.get(); for (long key : opts.queries) { - bh.consume(rounding.round(key)); + bh.consume(roundable.floor(key)); } } @@ -90,7 +90,7 @@ public static class Options { public String distribution; public long[] queries; - public Supplier supplier; + public Supplier supplier; @Setup public void setup() { @@ -130,10 +130,10 @@ public void setup() { switch (type) { case "binary": - supplier = () -> new Rounding.BinarySearchArrayRounding(values, size, null); + supplier = () -> new BinarySearcher(values, size); break; case "linear": - supplier = () -> new Rounding.BidirectionalLinearSearchArrayRounding(values, size, null); + supplier = () -> new BidirectionalLinearSearcher(values, size); break; default: throw new IllegalArgumentException("invalid type: " + type); diff --git a/libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java b/libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java new file mode 100644 index 0000000000000..5c3dcf2bd4708 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * It uses linear search on a sorted array of pre-computed round-down points. + * For small inputs (≤ 64 elements), this can be much faster than binary search as it avoids the penalty of + * branch mispredictions and pipeline stalls, and accesses memory sequentially. + * + *

    + * It uses "meet in the middle" linear search to avoid the worst case scenario when the desired element is present + * at either side of the array. This is helpful for time-series data where velocity increases over time, so more + * documents are likely to find a greater timestamp which is likely to be present on the right end of the array. + * + * @opensearch.internal + */ +@InternalApi +class BidirectionalLinearSearcher implements Roundable { + private final long[] ascending; + private final long[] descending; + + BidirectionalLinearSearcher(long[] values, int size) { + if (size <= 0) { + throw new IllegalArgumentException("at least one value must be present"); + } + + int len = (size + 1) >>> 1; // rounded-up to handle odd number of values + ascending = new long[len]; + descending = new long[len]; + + for (int i = 0; i < len; i++) { + ascending[i] = values[i]; + descending[i] = values[size - i - 1]; + } + } + + @Override + public long floor(long key) { + int i = 0; + for (; i < ascending.length; i++) { + if (descending[i] <= key) { + return descending[i]; + } + if (ascending[i] > key) { + assert i > 0 : "key must be greater than or equal to " + ascending[0]; + return ascending[i - 1]; + } + } + return ascending[i - 1]; + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java b/libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java new file mode 100644 index 0000000000000..b9d76945115ed --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +import java.util.Arrays; + +/** + * It uses binary search on a sorted array of pre-computed round-down points. + * + * @opensearch.internal + */ +@InternalApi +class BinarySearcher implements Roundable { + private final long[] values; + private final int size; + + BinarySearcher(long[] values, int size) { + if (size <= 0) { + throw new IllegalArgumentException("at least one value must be present"); + } + + this.values = values; + this.size = size; + } + + @Override + public long floor(long key) { + int idx = Arrays.binarySearch(values, 0, size, key); + assert idx != -1 : "key must be greater than or equal to " + values[0]; + if (idx < 0) { + idx = -2 - idx; + } + return values[idx]; + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/Roundable.java b/libs/common/src/main/java/org/opensearch/common/round/Roundable.java new file mode 100644 index 0000000000000..ae6f9b787c1e9 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/Roundable.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Interface to round-off values. + * + * @opensearch.internal + */ +@InternalApi +@FunctionalInterface +public interface Roundable { + /** + * Returns the greatest lower bound of the given key. + * In other words, it returns the largest value such that {@code value <= key}. + * @param key to floor + * @return the floored value + */ + long floor(long key); +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java b/libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java new file mode 100644 index 0000000000000..b7422694c3013 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Factory class to create and return the fastest implementation of {@link Roundable}. + * + * @opensearch.internal + */ +@InternalApi +public final class RoundableFactory { + /** + * The maximum limit up to which linear search is used, otherwise binary search is used. + * This is because linear search is much faster on small arrays. + * Benchmark results: PR #9727 + */ + private static final int LINEAR_SEARCH_MAX_SIZE = 64; + + private RoundableFactory() {} + + /** + * Creates and returns the fastest implementation of {@link Roundable}. + */ + public static Roundable create(long[] values, int size) { + if (size <= LINEAR_SEARCH_MAX_SIZE) { + return new BidirectionalLinearSearcher(values, size); + } else { + return new BinarySearcher(values, size); + } + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/package-info.java b/libs/common/src/main/java/org/opensearch/common/round/package-info.java new file mode 100644 index 0000000000000..e79c4017de31b --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains classes to round-off values. + */ +package org.opensearch.common.round; diff --git a/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java b/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java new file mode 100644 index 0000000000000..ae9f629c59024 --- /dev/null +++ b/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.test.OpenSearchTestCase; + +public class RoundableTests extends OpenSearchTestCase { + + public void testFloor() { + int size = randomIntBetween(1, 256); + long[] values = new long[size]; + for (int i = 1; i < values.length; i++) { + values[i] = values[i - 1] + (randomNonNegativeLong() % 200) + 1; + } + + Roundable[] impls = { new BinarySearcher(values, size), new BidirectionalLinearSearcher(values, size) }; + + for (int i = 0; i < 100000; i++) { + // Index of the expected round-down point. + int idx = randomIntBetween(0, size - 1); + + // Value of the expected round-down point. + long expected = values[idx]; + + // Delta between the expected and the next round-down point. + long delta = (idx < size - 1) ? (values[idx + 1] - values[idx]) : 200; + + // Adding a random delta between 0 (inclusive) and delta (exclusive) to the expected + // round-down point, which will still floor to the same value. + long key = expected + (randomNonNegativeLong() % delta); + + for (Roundable roundable : impls) { + assertEquals(expected, roundable.floor(key)); + } + } + } + + public void testFailureCases() { + Throwable throwable; + + throwable = assertThrows(IllegalArgumentException.class, () -> new BinarySearcher(new long[0], 0)); + assertEquals("at least one value must be present", throwable.getMessage()); + throwable = assertThrows(IllegalArgumentException.class, () -> new BidirectionalLinearSearcher(new long[0], 0)); + assertEquals("at least one value must be present", throwable.getMessage()); + + throwable = assertThrows(AssertionError.class, () -> new BinarySearcher(new long[] { 100 }, 1).floor(50)); + assertEquals("key must be greater than or equal to 100", throwable.getMessage()); + throwable = assertThrows(AssertionError.class, () -> new BidirectionalLinearSearcher(new long[] { 100 }, 1).floor(50)); + assertEquals("key must be greater than or equal to 100", throwable.getMessage()); + } +} diff --git a/server/src/main/java/org/opensearch/common/Rounding.java b/server/src/main/java/org/opensearch/common/Rounding.java index 667eb4529fe38..061934f9722f5 100644 --- a/server/src/main/java/org/opensearch/common/Rounding.java +++ b/server/src/main/java/org/opensearch/common/Rounding.java @@ -37,7 +37,8 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.LocalTimeOffset.Gap; import org.opensearch.common.LocalTimeOffset.Overlap; -import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.round.Roundable; +import org.opensearch.common.round.RoundableFactory; import org.opensearch.common.time.DateUtils; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; @@ -59,7 +60,6 @@ import java.time.temporal.TemporalQueries; import java.time.zone.ZoneOffsetTransition; import java.time.zone.ZoneRules; -import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.Objects; @@ -421,13 +421,6 @@ private abstract class PreparedRounding implements Prepared { */ private static final int DEFAULT_ARRAY_ROUNDING_MAX_THRESHOLD = 128; - /** - * The maximum limit up to which linear search is used, otherwise binary search is used. - * This is because linear search is much faster on small arrays. - * Benchmark results: PR #9727 - */ - private static final int LINEAR_SEARCH_ARRAY_ROUNDING_MAX_THRESHOLD = 64; - /** * Attempt to build a {@link Prepared} implementation that relies on pre-calcuated * "round down" points. If there would be more than {@code max} points then return @@ -451,9 +444,36 @@ protected Prepared maybeUseArray(long minUtcMillis, long maxUtcMillis, int max) values = ArrayUtil.grow(values, i + 1); values[i++] = rounded; } - return i <= LINEAR_SEARCH_ARRAY_ROUNDING_MAX_THRESHOLD - ? new BidirectionalLinearSearchArrayRounding(values, i, this) - : new BinarySearchArrayRounding(values, i, this); + return new ArrayRounding(RoundableFactory.create(values, i), this); + } + } + + /** + * ArrayRounding is an implementation of {@link Prepared} which uses + * pre-calculated round-down points to speed up lookups. + */ + private static class ArrayRounding implements Prepared { + private final Roundable roundable; + private final Prepared delegate; + + public ArrayRounding(Roundable roundable, Prepared delegate) { + this.roundable = roundable; + this.delegate = delegate; + } + + @Override + public long round(long utcMillis) { + return roundable.floor(utcMillis); + } + + @Override + public long nextRoundingValue(long utcMillis) { + return delegate.nextRoundingValue(utcMillis); + } + + @Override + public double roundingSize(long utcMillis, DateTimeUnit timeUnit) { + return delegate.roundingSize(utcMillis, timeUnit); } } @@ -1343,108 +1363,4 @@ public static Rounding read(StreamInput in) throws IOException { throw new OpenSearchException("unknown rounding id [" + id + "]"); } } - - /** - * Implementation of {@link Prepared} using pre-calculated "round down" points. - * - *

    - * It uses binary search to find the greatest round-down point less than or equal to the given timestamp. - * - * @opensearch.internal - */ - @InternalApi - static class BinarySearchArrayRounding implements Prepared { - private final long[] values; - private final int max; - private final Prepared delegate; - - BinarySearchArrayRounding(long[] values, int max, Prepared delegate) { - assert max > 0 : "at least one round-down point must be present"; - this.values = values; - this.max = max; - this.delegate = delegate; - } - - @Override - public long round(long utcMillis) { - assert values[0] <= utcMillis : "utcMillis must be after " + values[0]; - int idx = Arrays.binarySearch(values, 0, max, utcMillis); - assert idx != -1 : "The insertion point is before the array! This should have tripped the assertion above."; - assert -1 - idx <= values.length : "This insertion point is after the end of the array."; - if (idx < 0) { - idx = -2 - idx; - } - return values[idx]; - } - - @Override - public long nextRoundingValue(long utcMillis) { - return delegate.nextRoundingValue(utcMillis); - } - - @Override - public double roundingSize(long utcMillis, DateTimeUnit timeUnit) { - return delegate.roundingSize(utcMillis, timeUnit); - } - } - - /** - * Implementation of {@link Prepared} using pre-calculated "round down" points. - * - *

    - * It uses linear search to find the greatest round-down point less than or equal to the given timestamp. - * For small inputs (≤ 64 elements), this can be much faster than binary search as it avoids the penalty of - * branch mispredictions and pipeline stalls, and accesses memory sequentially. - * - *

    - * It uses "meet in the middle" linear search to avoid the worst case scenario when the desired element is present - * at either side of the array. This is helpful for time-series data where velocity increases over time, so more - * documents are likely to find a greater timestamp which is likely to be present on the right end of the array. - * - * @opensearch.internal - */ - @InternalApi - static class BidirectionalLinearSearchArrayRounding implements Prepared { - private final long[] ascending; - private final long[] descending; - private final Prepared delegate; - - BidirectionalLinearSearchArrayRounding(long[] values, int max, Prepared delegate) { - assert max > 0 : "at least one round-down point must be present"; - this.delegate = delegate; - int len = (max + 1) >>> 1; // rounded-up to handle odd number of values - ascending = new long[len]; - descending = new long[len]; - - for (int i = 0; i < len; i++) { - ascending[i] = values[i]; - descending[i] = values[max - i - 1]; - } - } - - @Override - public long round(long utcMillis) { - int i = 0; - for (; i < ascending.length; i++) { - if (descending[i] <= utcMillis) { - return descending[i]; - } - if (ascending[i] > utcMillis) { - assert i > 0 : "utcMillis must be after " + ascending[0]; - return ascending[i - 1]; - } - } - return ascending[i - 1]; - } - - @Override - public long nextRoundingValue(long utcMillis) { - return delegate.nextRoundingValue(utcMillis); - } - - @Override - public double roundingSize(long utcMillis, DateTimeUnit timeUnit) { - return delegate.roundingSize(utcMillis, timeUnit); - } - } } diff --git a/server/src/test/java/org/opensearch/common/RoundingTests.java b/server/src/test/java/org/opensearch/common/RoundingTests.java index cc71ee08abcca..9a6e6a6ac54d0 100644 --- a/server/src/test/java/org/opensearch/common/RoundingTests.java +++ b/server/src/test/java/org/opensearch/common/RoundingTests.java @@ -1142,28 +1142,6 @@ public void testNonMillisecondsBasedUnitCalendarRoundingSize() { assertThat(prepared.roundingSize(thirdQuarter, Rounding.DateTimeUnit.HOUR_OF_DAY), closeTo(2208.0, 0.000001)); } - public void testArrayRoundingImplementations() { - int length = randomIntBetween(1, 256); - long[] values = new long[length]; - for (int i = 1; i < values.length; i++) { - values[i] = values[i - 1] + (randomNonNegativeLong() % 100); - } - - Rounding.Prepared binarySearchImpl = new Rounding.BinarySearchArrayRounding(values, length, null); - Rounding.Prepared linearSearchImpl = new Rounding.BidirectionalLinearSearchArrayRounding(values, length, null); - - for (int i = 0; i < 100000; i++) { - long key = values[0] + (randomNonNegativeLong() % (100 + values[length - 1] - values[0])); - assertEquals(binarySearchImpl.round(key), linearSearchImpl.round(key)); - } - - AssertionError exception = expectThrows(AssertionError.class, () -> { binarySearchImpl.round(values[0] - 1); }); - assertEquals("utcMillis must be after " + values[0], exception.getMessage()); - - exception = expectThrows(AssertionError.class, () -> { linearSearchImpl.round(values[0] - 1); }); - assertEquals("utcMillis must be after " + values[0], exception.getMessage()); - } - private void assertInterval(long rounded, long nextRoundingValue, Rounding rounding, int minutes, ZoneId tz) { assertInterval(rounded, dateBetween(rounded, nextRoundingValue), nextRoundingValue, rounding, tz); long millisPerMinute = 60_000; From 53d532996c5088e65f8652fd90548962270cca56 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 15 Nov 2023 14:04:14 -0800 Subject: [PATCH 156/178] Muting flaky tests in SegmentReplicationSuiteIT (#11159) Signed-off-by: Marc Handalian --- .../indices/replication/SegmentReplicationSuiteIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java index 800704eae7fa7..8c045c1560dd3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication; +import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -15,6 +16,7 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9499") @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 2) public class SegmentReplicationSuiteIT extends SegmentReplicationBaseIT { From 54ff35307686dbcd745e593533c61291f48a425c Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 15 Nov 2023 14:05:51 -0800 Subject: [PATCH 157/178] Fix SegmentReplicationPressureService to not schedule async tasks when remote failures are disabled (#10569) * Fix SegmentReplicationPressureService to not schedule async tasks when remote failures are disabled Today segment replication's pressure service provides a setting 'segrep.replication.time.limit' that if set to a positive value will spawn an Async task every 30s validating if there are replicas over that limit. If over the limit the task considers if shards should be failed remotely. The async task is being rescheduled every 30s, even if the setting is set to 0. This change ensures that task isn't scheduled at all and reconsiders only if the setting is updated. Signed-off-by: Marc Handalian * PR feedback. Signed-off-by: Marc Handalian * Revert "PR feedback." This reverts commit 737116104a61ec2245bda14327f50b8bf701c2a3. Signed-off-by: Marc Handalian * Add better unit test to ensure task is scheduled/unscheduled. Signed-off-by: Marc Handalian --------- Signed-off-by: Marc Handalian --- .../SegmentReplicationPressureService.java | 19 ++++++++-- ...egmentReplicationPressureServiceTests.java | 37 +++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java index d9d480e7b2b27..ce38dd3bb236c 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java @@ -98,7 +98,7 @@ public class SegmentReplicationPressureService implements Closeable { private final SegmentReplicationStatsTracker tracker; private final ShardStateAction shardStateAction; - private final AsyncFailStaleReplicaTask failStaleReplicaTask; + private volatile AsyncFailStaleReplicaTask failStaleReplicaTask; @Inject public SegmentReplicationPressureService( @@ -202,6 +202,15 @@ public void setMaxAllowedStaleReplicas(double maxAllowedStaleReplicas) { public void setReplicationTimeLimitFailReplica(TimeValue replicationTimeLimitFailReplica) { this.replicationTimeLimitFailReplica = replicationTimeLimitFailReplica; + updateAsyncFailReplicaTask(); + } + + private synchronized void updateAsyncFailReplicaTask() { + try { + failStaleReplicaTask.close(); + } finally { + failStaleReplicaTask = new AsyncFailStaleReplicaTask(this); + } } public void setReplicationTimeLimitBackpressure(TimeValue replicationTimeLimitBackpressure) { @@ -228,13 +237,13 @@ final static class AsyncFailStaleReplicaTask extends AbstractAsyncTask { @Override protected boolean mustReschedule() { - return true; + return pressureService.shouldScheduleAsyncFailTask(); } @Override protected void runInternal() { // Do not fail the replicas if time limit is set to 0 (i.e. disabled). - if (TimeValue.ZERO.equals(pressureService.replicationTimeLimitFailReplica) == false) { + if (pressureService.shouldScheduleAsyncFailTask()) { final SegmentReplicationStats stats = pressureService.tracker.getStats(); // Find the shardId in node which is having stale replicas with highest current replication time. @@ -302,4 +311,8 @@ public String toString() { } + boolean shouldScheduleAsyncFailTask() { + return TimeValue.ZERO.equals(replicationTimeLimitFailReplica) == false; + } + } diff --git a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java index 478fdcb24f76a..a9725f638cc53 100644 --- a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java @@ -217,6 +217,8 @@ public void testFailStaleReplicaTask() throws Exception { assertEquals(5, shardStats.getCheckpointsBehindCount()); // call the background task + assertTrue(service.getFailStaleReplicaTask().mustReschedule()); + assertTrue(service.getFailStaleReplicaTask().isScheduled()); service.getFailStaleReplicaTask().runInternal(); // verify that remote shard failed method is called which fails the replica shards falling behind. @@ -257,6 +259,41 @@ public void testFailStaleReplicaTaskDisabled() throws Exception { } } + public void testFailStaleReplicaTaskToggleOnOff() throws Exception { + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(10)) + .put(MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING.getKey(), TimeValue.timeValueMillis(1)) + .build(); + + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + SegmentReplicationPressureService service = buildPressureService(settings, primaryShard); + + // index docs in batches without refreshing + indexInBatches(5, shards, primaryShard); + + // assert that replica shard is few checkpoints behind primary + Set replicationStats = primaryShard.getReplicationStatsForTrackedReplicas(); + assertEquals(1, replicationStats.size()); + SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); + assertEquals(5, shardStats.getCheckpointsBehindCount()); + + assertTrue(service.getFailStaleReplicaTask().mustReschedule()); + assertTrue(service.getFailStaleReplicaTask().isScheduled()); + replicateSegments(primaryShard, shards.getReplicas()); + + service.setReplicationTimeLimitFailReplica(TimeValue.ZERO); + assertFalse(service.getFailStaleReplicaTask().mustReschedule()); + assertFalse(service.getFailStaleReplicaTask().isScheduled()); + service.setReplicationTimeLimitFailReplica(TimeValue.timeValueMillis(1)); + assertTrue(service.getFailStaleReplicaTask().mustReschedule()); + assertTrue(service.getFailStaleReplicaTask().isScheduled()); + } + } + private int indexInBatches(int count, ReplicationGroup shards, IndexShard primaryShard) throws Exception { int totalDocs = 0; for (int i = 0; i < count; i++) { From 5b505ec04c2684867901d1cabfed8431d7d34fe3 Mon Sep 17 00:00:00 2001 From: Jeongmin Yu Date: Thu, 16 Nov 2023 07:11:14 +0900 Subject: [PATCH 158/178] Treat Setting value with empty array string as empty array (#10625) * Treat Setting value with empty array string as empty array Signed-off-by: Jeongmin Yu * Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) Signed-off-by: Andriy Redko --------- Signed-off-by: Jeongmin Yu Signed-off-by: Andriy Redko Co-authored-by: Andriy Redko --- CHANGELOG.md | 1 + server/build.gradle | 1 + .../opensearch/common/settings/Settings.java | 39 ++++++++++++++++++- .../common/settings/SettingsTests.java | 24 ++++++++++++ .../node/NodeRoleSettingsTests.java | 10 +++++ .../common/settings/loader/test-settings.yml | 1 + 6 files changed, 74 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8604f0e2914ee..a75463ff16a08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) - [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286)) - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) +- Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) - [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) - Add back half_float BKD based sort query optimization ([#11024](https://github.com/opensearch-project/OpenSearch/pull/11024)) diff --git a/server/build.gradle b/server/build.gradle index fa8a44ef6fc94..17b8ff0469ad8 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -403,6 +403,7 @@ tasks.named("licenseHeaders").configure { } tasks.test { + environment "node.roles.test", "[]" if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_1_8) { jvmArgs += ["--add-opens", "java.base/java.nio.file=ALL-UNNAMED"] } diff --git a/server/src/main/java/org/opensearch/common/settings/Settings.java b/server/src/main/java/org/opensearch/common/settings/Settings.java index 0557884f0f8ad..9da47ff3aa700 100644 --- a/server/src/main/java/org/opensearch/common/settings/Settings.java +++ b/server/src/main/java/org/opensearch/common/settings/Settings.java @@ -80,6 +80,7 @@ import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.TreeMap; import java.util.concurrent.TimeUnit; @@ -1211,8 +1212,14 @@ public boolean shouldRemoveMissingPlaceholder(String placeholderName) { String value = propertyPlaceholder.replacePlaceholders(Settings.toString(entry.getValue()), placeholderResolver); // if the values exists and has length, we should maintain it in the map // otherwise, the replace process resolved into removing it - if (Strings.hasLength(value)) { - entry.setValue(value); + if (Strings.hasLength(value) == true) { + // try to parse the value as a list first + final Optional> optList = tryParseableStringToList(value); + if (optList.isPresent()) { + entry.setValue(optList.get()); + } else { + entry.setValue(value); + } } else { entryItr.remove(); } @@ -1248,6 +1255,34 @@ public Settings build() { processLegacyLists(map); return new Settings(map, secureSettings.get()); } + + /** + * Tries to parse the placeholder value as a list (fe [], ["a", "b", "c"]) + * @param parsableString placeholder value to parse + * @return the {@link Optional} result of the parsing attempt + */ + private static Optional> tryParseableStringToList(String parsableString) { + // fromXContent doesn't use named xcontent or deprecation. + try ( + XContentParser xContentParser = MediaTypeRegistry.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, parsableString) + ) { + XContentParser.Token token = xContentParser.nextToken(); + if (token != XContentParser.Token.START_ARRAY) { + return Optional.empty(); + } + ArrayList list = new ArrayList<>(); + while ((token = xContentParser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token != XContentParser.Token.VALUE_STRING) { + return Optional.empty(); + } + list.add(xContentParser.text()); + } + return Optional.of(list); + } catch (IOException e) { + return Optional.empty(); + } + } } // TODO We could use an FST internally to make things even faster and more compact diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java index af4efabb341ee..669d40f40bb2c 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java @@ -50,6 +50,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -64,7 +65,9 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -93,6 +96,15 @@ public void testReplacePropertiesPlaceholderSystemPropertyList() { assertThat(settings.getAsList("setting1"), contains(hostname, hostip)); } + public void testReplacePropertiesPlaceholderSystemPropertyEmptyList() { + final Settings settings = Settings.builder() + .put("setting1", "${HOSTNAMES}") + .replacePropertyPlaceholders(name -> name.equals("HOSTNAMES") ? "[]" : null) + .build(); + assertThat(settings.getAsList("setting1"), empty()); + assertThat(settings.get("setting1"), equalTo("[]")); + } + public void testReplacePropertiesPlaceholderSystemVariablesHaveNoEffect() { final String value = System.getProperty("java.home"); assertNotNull(value); @@ -603,6 +615,18 @@ public void testSimpleYamlSettings() throws Exception { assertThat(settings.getAsList("test1.test3").size(), equalTo(2)); assertThat(settings.getAsList("test1.test3").get(0), equalTo("test3-1")); assertThat(settings.getAsList("test1.test3").get(1), equalTo("test3-2")); + assertThat(settings.getAsList("test1.test4"), empty()); + } + + public void testYamlPlaceholder() throws IOException { + try (InputStream in = new ByteArrayInputStream("hosts: ${HOSTNAMES}".getBytes(StandardCharsets.UTF_8))) { + Settings settings = Settings.builder() + .loadFromStream("foo.yml", in, false) + .replacePropertyPlaceholders(name -> name.equals("HOSTNAMES") ? "[\"h1\", \"h2\"]" : null) + .build(); + assertThat(settings.getAsList("hosts"), hasSize(2)); + assertThat(settings.getAsList("hosts"), containsInAnyOrder("h1", "h2")); + } } public void testYamlLegacyList() throws IOException { diff --git a/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java b/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java index 0a3af34bc12f4..b2bb6897fe164 100644 --- a/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java +++ b/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java @@ -18,6 +18,7 @@ import java.util.List; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; public class NodeRoleSettingsTests extends OpenSearchTestCase { @@ -72,4 +73,13 @@ public void testUnknownNodeRoleOnly() { assertEquals(testRole, nodeRoles.get(0).roleName()); assertEquals(testRole, nodeRoles.get(0).roleNameAbbreviation()); } + + public void testNodeRolesFromEnvironmentVariables() { + Settings roleSettings = Settings.builder() + .put(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), "${node.roles.test}") + .replacePropertyPlaceholders() + .build(); + List nodeRoles = NodeRoleSettings.NODE_ROLES_SETTING.get(roleSettings); + assertThat(nodeRoles, empty()); + } } diff --git a/server/src/test/resources/org/opensearch/common/settings/loader/test-settings.yml b/server/src/test/resources/org/opensearch/common/settings/loader/test-settings.yml index b533ae036e758..1bffbc18e83e6 100644 --- a/server/src/test/resources/org/opensearch/common/settings/loader/test-settings.yml +++ b/server/src/test/resources/org/opensearch/common/settings/loader/test-settings.yml @@ -6,3 +6,4 @@ test1: test3: - test3-1 - test3-2 + test4: [] From 09bacee5fc85676e97bee6b4ad87dec35c6aa8cc Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 16 Nov 2023 10:59:19 -0500 Subject: [PATCH 159/178] [BWC and API enforcement] Decorate the existing APIs with proper annotations (part 3) (#11182) Signed-off-by: Andriy Redko --- .../java/org/opensearch/common/Explicit.java | 5 +- .../opensearch/common/lease/Releasable.java | 5 +- .../core/action/ActionResponse.java | 2 + .../core/common/io/stream/Writeable.java | 11 +++- .../core/common/util/ByteArray.java | 4 +- .../breaker/AllCircuitBreakerStats.java | 4 +- .../breaker/CircuitBreakerService.java | 8 +-- .../indices/breaker/CircuitBreakerStats.java | 4 +- .../core/xcontent/ObjectParser.java | 4 +- ...emoteStoreBackpressureAndResiliencyIT.java | 4 +- .../org/opensearch/action/AliasesRequest.java | 5 +- .../opensearch/action/OriginalIndices.java | 4 +- .../ClusterAllocationExplanation.java | 4 +- .../snapshots/status/SnapshotStatus.java | 4 +- .../admin/indices/rollover/RolloverInfo.java | 4 +- .../action/search/ParsedScrollId.java | 5 +- .../action/search/SearchContextIdForNode.java | 4 +- .../opensearch/action/search/SearchPhase.java | 4 +- .../action/search/SearchPhaseContext.java | 4 +- .../SearchRequestOperationsListener.java | 4 +- .../action/search/SearchShardIterator.java | 4 +- .../action/search/SearchShardTask.java | 4 +- .../action/search/ShardSearchFailure.java | 4 +- .../action/support/ActiveShardCount.java | 4 +- .../action/support/RetryableAction.java | 4 +- .../PendingReplicationActions.java | 4 +- .../support/replication/ReplicationMode.java | 5 +- .../replication/ReplicationResponse.java | 10 +++- .../org/opensearch/cluster/ClusterState.java | 10 +++- .../cluster/ClusterStateApplier.java | 4 +- .../cluster/ClusterStateListener.java | 5 +- .../cluster/ClusterStateTaskConfig.java | 4 +- .../cluster/ClusterStateTaskExecutor.java | 10 +++- .../cluster/ClusterStateTaskListener.java | 4 +- .../cluster/ClusterStateUpdateTask.java | 4 +- .../LocalNodeClusterManagerListener.java | 5 +- .../cluster/LocalNodeMasterListener.java | 5 +- .../org/opensearch/cluster/NamedDiff.java | 4 +- .../cluster/NodeConnectionsService.java | 4 +- .../opensearch/cluster/RestoreInProgress.java | 13 +++-- .../cluster/SnapshotsInProgress.java | 4 +- .../cluster/TimeoutClusterStateListener.java | 4 +- .../ack/ClusterStateUpdateResponse.java | 5 +- .../cluster/block/ClusterBlockException.java | 4 +- .../cluster/block/ClusterBlocks.java | 7 ++- .../block/IndexCreateBlockException.java | 4 +- .../coordination/ClusterStatePublisher.java | 7 ++- .../coordination/CoordinationMetadata.java | 13 +++-- .../coordination/PersistedStateStats.java | 4 +- .../DecommissionAttributeMetadata.java | 4 +- .../cluster/metadata/ComponentTemplate.java | 4 +- .../metadata/ComposableIndexTemplate.java | 7 ++- .../cluster/metadata/DiffableStringMap.java | 4 +- .../cluster/metadata/IndexAbstraction.java | 10 +++- .../cluster/metadata/IndexGraveyard.java | 10 +++- .../cluster/metadata/IndexMetadata.java | 9 ++- .../opensearch/cluster/metadata/Metadata.java | 13 +++-- .../opensearch/cluster/metadata/Template.java | 4 +- .../metadata/WeightedRoutingMetadata.java | 7 +-- .../cluster/node/DiscoveryNode.java | 22 -------- .../cluster/node/DiscoveryNodeFilters.java | 7 ++- .../cluster/routing/AllocationId.java | 4 +- .../cluster/routing/GroupShardsIterator.java | 4 +- .../cluster/routing/IndexRoutingTable.java | 7 ++- .../routing/IndexShardRoutingTable.java | 7 ++- .../cluster/routing/OperationRouting.java | 4 +- .../cluster/routing/RecoverySource.java | 6 +- .../cluster/routing/RerouteService.java | 4 +- .../routing/RoutingChangesObserver.java | 5 +- .../cluster/routing/RoutingNode.java | 4 +- .../cluster/routing/RoutingNodes.java | 10 +++- .../cluster/routing/RoutingTable.java | 7 ++- .../cluster/routing/ShardIterator.java | 4 +- .../routing/ShardMovementStrategy.java | 4 +- .../cluster/routing/ShardRouting.java | 4 +- .../cluster/routing/ShardsIterator.java | 4 +- .../AllocateUnassignedDecision.java | 4 +- .../allocation/AllocationDecision.java | 4 +- .../routing/allocation/MoveDecision.java | 4 +- .../allocation/NodeAllocationResult.java | 7 ++- .../routing/allocation/RoutingAllocation.java | 7 ++- .../allocation/ShardAllocationDecision.java | 4 +- .../allocation/command/AllocationCommand.java | 4 +- .../command/AllocationCommands.java | 4 +- .../allocation/decider/AllocationDecider.java | 4 +- .../decider/AllocationDeciders.java | 4 +- .../cluster/service/ClusterApplier.java | 4 ++ .../service/ClusterApplierService.java | 4 +- .../service/ClusterManagerService.java | 4 +- .../ClusterManagerThrottlingStats.java | 4 ++ .../cluster/service/ClusterService.java | 4 +- .../cluster/service/ClusterStateStats.java | 4 +- .../cluster/service/MasterService.java | 4 +- .../java/org/opensearch/common/Rounding.java | 13 +++-- .../org/opensearch/common/cache/Cache.java | 7 ++- .../opensearch/common/cache/CacheLoader.java | 5 +- .../common/cache/RemovalNotification.java | 5 +- .../common/cache/RemovalReason.java | 5 +- .../common/compress/CompressedXContent.java | 4 +- .../common/concurrent/GatedCloseable.java | 4 +- .../common/io/stream/DelayableWriteable.java | 7 ++- .../index/OpenSearchDirectoryReader.java | 4 +- .../lucene/search/TopDocsAndMaxScore.java | 4 +- .../lucene/uid/VersionsAndSeqNoResolver.java | 4 +- .../opensearch/common/time/DateFormatter.java | 4 +- .../common/time/DateMathParser.java | 4 +- .../org/opensearch/common/util/BigArrays.java | 8 ++- .../common/util/CancellableThreads.java | 15 +++-- .../org/opensearch/common/util/Countable.java | 5 +- .../opensearch/common/util/DoubleArray.java | 4 +- .../opensearch/common/util/FloatArray.java | 4 +- .../org/opensearch/common/util/IntArray.java | 4 +- .../org/opensearch/common/util/LongArray.java | 4 +- .../opensearch/common/util/ObjectArray.java | 4 +- .../util/concurrent/AsyncIOProcessor.java | 4 +- .../common/util/concurrent/AtomicArray.java | 4 +- .../common/util/concurrent/ThreadContext.java | 7 ++- .../ThreadContextStatePropagator.java | 5 ++ .../org/opensearch/env/NodeEnvironment.java | 3 + .../gateway/WriteStateException.java | 5 +- .../org/opensearch/index/IndexService.java | 12 ++-- .../org/opensearch/index/IndexWarmer.java | 7 ++- .../index/IndexingPressureService.java | 4 +- .../index/analysis/AnalysisRegistry.java | 4 +- .../index/analysis/AnalyzerProvider.java | 4 +- .../index/analysis/CharFilterFactory.java | 5 +- .../index/analysis/IndexAnalyzers.java | 4 +- .../index/analysis/TokenFilterFactory.java | 4 +- .../index/analysis/TokenizerFactory.java | 4 +- .../opensearch/index/cache/IndexCache.java | 4 +- .../index/cache/bitset/BitsetFilterCache.java | 7 ++- .../cache/bitset/ShardBitsetFilterCache.java | 4 +- .../index/cache/query/QueryCache.java | 4 +- .../cache/request/ShardRequestCache.java | 4 +- .../org/opensearch/index/engine/Engine.java | 55 +++++++++++++------ .../opensearch/index/engine/EngineConfig.java | 7 ++- .../index/engine/EngineException.java | 4 +- .../index/engine/EngineFactory.java | 5 +- .../index/engine/NRTReplicationEngine.java | 4 +- .../index/engine/SafeCommitInfo.java | 4 +- .../index/fielddata/DocValueBits.java | 5 +- .../index/fielddata/IndexFieldData.java | 13 ++++- .../index/fielddata/IndexFieldDataCache.java | 7 ++- .../index/fielddata/LeafFieldData.java | 4 +- .../index/fielddata/NumericDoubleValues.java | 4 +- .../index/fielddata/ShardFieldData.java | 4 +- .../fielddata/SortedNumericDoubleValues.java | 4 +- .../opensearch/index/get/ShardGetService.java | 4 +- .../opensearch/index/mapper/ContentPath.java | 5 +- .../index/mapper/DocValueFetcher.java | 4 +- .../index/mapper/DocumentMapper.java | 9 ++- .../index/mapper/DocumentMapperForType.java | 3 + .../index/mapper/DocumentMapperParser.java | 4 +- .../index/mapper/DynamicTemplate.java | 7 ++- .../opensearch/index/mapper/FieldMapper.java | 10 +++- .../index/mapper/FieldTypeLookup.java | 2 + .../index/mapper/IdFieldMapper.java | 4 +- .../index/mapper/IndexFieldMapper.java | 4 +- .../index/mapper/MappedFieldType.java | 7 ++- .../org/opensearch/index/mapper/Mapper.java | 16 ++++-- .../index/mapper/MapperParsingException.java | 4 +- .../index/mapper/MapperService.java | 5 +- .../org/opensearch/index/mapper/Mapping.java | 4 +- .../index/mapper/MappingLookup.java | 6 +- .../index/mapper/MetadataFieldMapper.java | 10 +++- .../opensearch/index/mapper/ObjectMapper.java | 10 +++- .../index/mapper/ParametrizedFieldMapper.java | 17 ++++-- .../opensearch/index/mapper/ParseContext.java | 7 ++- .../index/mapper/ParsedDocument.java | 4 +- .../index/mapper/RootObjectMapper.java | 4 +- .../index/mapper/RoutingFieldMapper.java | 4 +- .../index/mapper/SeqNoFieldMapper.java | 4 +- .../index/mapper/SourceFieldMapper.java | 4 +- .../index/mapper/SourceToParse.java | 4 +- .../index/mapper/TextSearchInfo.java | 7 ++- .../opensearch/index/mapper/ValueFetcher.java | 4 +- .../index/query/InnerHitBuilder.java | 4 ++ .../opensearch/index/query/ParsedQuery.java | 4 +- .../index/query/QueryShardContext.java | 4 +- .../opensearch/index/query/Rewriteable.java | 4 +- .../index/query/support/NestedScope.java | 4 +- .../RemoteStoreStatsTrackerFactory.java | 4 +- .../index/seqno/ReplicationTracker.java | 7 ++- .../index/seqno/RetentionLeaseSyncer.java | 2 + .../index/seqno/SequenceNumbers.java | 5 +- .../shard/GlobalCheckpointListeners.java | 4 +- .../IllegalIndexShardStateException.java | 4 +- .../index/shard/IndexEventListener.java | 4 +- .../opensearch/index/shard/IndexShard.java | 7 ++- .../shard/IndexShardClosedException.java | 4 +- .../IndexShardNotRecoveringException.java | 4 +- .../shard/IndexShardNotStartedException.java | 4 +- .../shard/IndexShardRecoveringException.java | 4 +- .../shard/IndexShardRelocatedException.java | 4 +- .../shard/IndexShardStartedException.java | 4 +- .../index/shard/IndexShardState.java | 5 +- .../shard/IndexingOperationListener.java | 4 +- .../index/shard/PrimaryReplicaSyncer.java | 7 ++- .../index/shard/ReplicationGroup.java | 4 +- .../index/shard/SearchOperationListener.java | 4 +- .../org/opensearch/index/shard/ShardPath.java | 4 +- .../index/similarity/SimilarityProvider.java | 4 +- .../index/similarity/SimilarityService.java | 4 +- .../snapshots/IndexShardSnapshotStatus.java | 11 +++- .../RemoteStoreShardShallowCopySnapshot.java | 4 +- .../stats/IndexingPressurePerShardStats.java | 4 +- .../index/stats/IndexingPressureStats.java | 4 +- .../stats/ShardIndexingPressureStats.java | 4 +- .../store/RemoteSegmentStoreDirectory.java | 7 ++- .../store/RemoteStoreFileDownloader.java | 6 +- .../org/opensearch/index/store/Store.java | 12 ++-- .../index/store/StoreFileMetadata.java | 4 +- .../index/store/lockmanager/LockInfo.java | 6 +- .../lockmanager/RemoteStoreLockManager.java | 6 +- .../RemoteStoreLockManagerFactory.java | 4 +- .../RemoteStoreMetadataLockManager.java | 4 +- .../remote/filecache/CachedIndexInput.java | 4 +- .../store/remote/filecache/FileCache.java | 4 +- .../metadata/RemoteSegmentMetadata.java | 4 +- .../store/remote/utils/cache/CacheUsage.java | 3 + .../remote/utils/cache/stats/CacheStats.java | 4 +- .../index/translog/BaseTranslogReader.java | 2 +- .../BufferedChecksumStreamOutput.java | 4 +- .../index/translog/ChannelFactory.java | 5 +- .../opensearch/index/translog/Checkpoint.java | 6 +- .../index/translog/TragicExceptionHolder.java | 4 +- .../opensearch/index/translog/Translog.java | 21 ++++--- .../index/translog/TranslogConfig.java | 4 +- .../translog/TranslogDeletionPolicy.java | 4 +- .../TranslogDeletionPolicyFactory.java | 2 + .../index/translog/TranslogException.java | 4 +- .../index/translog/TranslogFactory.java | 5 +- .../index/translog/TranslogManager.java | 5 +- .../index/translog/TranslogReader.java | 4 +- .../translog/TranslogRecoveryRunner.java | 5 +- .../index/translog/TranslogWriter.java | 6 +- .../index/warmer/ShardIndexWarmerService.java | 4 +- .../opensearch/indices/IndicesQueryCache.java | 4 +- .../opensearch/indices/IndicesService.java | 7 ++- .../opensearch/indices/NodeIndicesStats.java | 4 +- .../cluster/IndicesClusterStateService.java | 4 +- .../cache/IndicesFieldDataCache.java | 7 ++- .../indices/mapper/MapperRegistry.java | 4 +- .../recovery/PeerRecoveryTargetService.java | 4 +- .../indices/recovery/RecoveryListener.java | 2 + .../indices/recovery/RecoveryTarget.java | 4 +- .../recovery/StartRecoveryRequest.java | 4 +- .../checkpoint/PublishCheckpointAction.java | 5 +- .../checkpoint/ReplicationCheckpoint.java | 4 +- ...SegmentReplicationCheckpointPublisher.java | 7 ++- .../common/ReplicationFailedException.java | 4 +- .../replication/common/ReplicationState.java | 5 +- .../node/AdaptiveSelectionStats.java | 4 +- .../main/java/org/opensearch/node/Node.java | 19 +++---- .../node/ResponseCollectorService.java | 7 ++- .../opensearch/plugins/IndexStorePlugin.java | 8 +++ .../org/opensearch/repositories/IndexId.java | 4 +- .../IndexMetaDataGenerations.java | 4 +- .../opensearch/repositories/Repository.java | 7 ++- .../repositories/RepositoryData.java | 4 +- .../repositories/RepositoryInfo.java | 4 +- .../repositories/RepositoryShardId.java | 4 +- .../repositories/RepositoryStats.java | 4 +- .../repositories/RepositoryStatsSnapshot.java | 4 +- .../repositories/ShardGenerations.java | 7 ++- .../opensearch/script/AggregationScript.java | 7 ++- .../org/opensearch/script/FieldScript.java | 7 ++- .../org/opensearch/search/DocValueFormat.java | 4 +- .../org/opensearch/search/MultiValueMode.java | 4 +- .../org/opensearch/search/RescoreDocIds.java | 4 +- .../java/org/opensearch/search/SearchHit.java | 7 ++- .../org/opensearch/search/SearchHits.java | 4 +- .../opensearch/search/SearchPhaseResult.java | 4 +- .../opensearch/search/SearchShardTarget.java | 4 +- .../opensearch/search/SearchSortValues.java | 4 +- .../search/SearchSortValuesAndFormats.java | 4 +- .../search/aggregations/Aggregation.java | 4 +- .../aggregations/AggregationBuilder.java | 7 ++- .../aggregations/AggregationProcessor.java | 4 ++ .../search/aggregations/Aggregations.java | 4 +- .../search/aggregations/Aggregator.java | 8 ++- .../aggregations/AggregatorFactories.java | 7 ++- .../aggregations/AggregatorFactory.java | 4 +- .../BucketCollectorProcessor.java | 4 ++ .../aggregations/CardinalityUpperBound.java | 4 +- .../aggregations/InternalAggregation.java | 10 +++- .../aggregations/InternalAggregations.java | 4 +- .../MultiBucketConsumerService.java | 4 +- .../PipelineAggregationBuilder.java | 4 +- .../SearchContextAggregations.java | 5 +- .../bucket/LocalBucketCountThresholds.java | 4 +- .../bucket/terms/TermsAggregator.java | 4 +- .../pipeline/PipelineAggregator.java | 7 ++- .../aggregations/support/AggregationInfo.java | 4 +- .../aggregations/support/AggregationPath.java | 4 +- .../support/AggregationUsageService.java | 7 ++- .../aggregations/support/FieldContext.java | 4 +- .../aggregations/support/ValueType.java | 4 +- .../aggregations/support/ValuesSource.java | 4 +- .../support/ValuesSourceConfig.java | 4 +- .../support/ValuesSourceRegistry.java | 10 +++- .../support/ValuesSourceType.java | 4 +- .../search/builder/SearchSourceBuilder.java | 10 +++- .../opensearch/search/dfs/AggregatedDfs.java | 4 +- .../search/dfs/DfsSearchResult.java | 4 +- .../opensearch/search/fetch/FetchPhase.java | 4 +- .../search/fetch/FetchSearchResult.java | 4 +- .../search/fetch/StoredFieldsContext.java | 4 +- .../fetch/subphase/FetchDocValuesContext.java | 4 +- .../fetch/subphase/FetchFieldsContext.java | 5 +- .../search/fetch/subphase/FieldAndFormat.java | 4 +- .../fetch/subphase/InnerHitsContext.java | 7 ++- .../fetch/subphase/ScriptFieldsContext.java | 7 ++- .../subphase/highlight/HighlightField.java | 4 +- .../search/internal/ContextIndexSearcher.java | 4 +- .../internal/InternalSearchResponse.java | 4 +- .../search/internal/ReaderContext.java | 4 +- .../search/internal/ScrollContext.java | 4 +- .../search/internal/SearchContext.java | 4 +- .../search/internal/ShardSearchContextId.java | 4 +- .../search/internal/ShardSearchRequest.java | 4 +- .../opensearch/search/lookup/DocLookup.java | 4 +- .../search/lookup/LeafDocLookup.java | 4 +- .../search/lookup/LeafFieldsLookup.java | 4 +- .../search/lookup/LeafSearchLookup.java | 4 +- .../search/lookup/SearchLookup.java | 4 +- .../search/lookup/SourceLookup.java | 4 +- .../opensearch/search/profile/Profilers.java | 4 +- .../AggregationProfileBreakdown.java | 4 +- .../aggregation/AggregationProfiler.java | 4 +- .../query/InternalProfileComponent.java | 5 +- .../search/profile/query/QueryProfiler.java | 4 +- .../search/query/QueryCollectorContext.java | 4 +- .../opensearch/search/query/QueryPhase.java | 4 +- .../query/QueryPhaseExecutionException.java | 4 +- .../search/query/QueryPhaseSearcher.java | 4 +- .../search/query/QuerySearchResult.java | 4 +- .../search/query/ReduceableSearchResult.java | 5 +- .../search/rescore/RescoreContext.java | 4 +- .../opensearch/search/rescore/Rescorer.java | 4 +- .../search/rescore/RescorerBuilder.java | 4 +- .../opensearch/search/sort/BucketedSort.java | 16 ++++-- .../search/sort/NestedSortBuilder.java | 4 +- .../search/sort/SortAndFormats.java | 4 +- .../opensearch/search/sort/SortBuilder.java | 4 +- .../org/opensearch/search/sort/SortValue.java | 4 +- .../InternalSnapshotsInfoService.java | 5 +- .../snapshots/SnapshotShardSizeInfo.java | 4 +- .../org/opensearch/transport/Transport.java | 4 ++ .../transport/TransportException.java | 4 +- .../transport/TransportRequest.java | 4 +- .../transport/TransportRequestOptions.java | 10 +++- .../index/IndexServiceTestUtils.java | 20 +++++++ 353 files changed, 1404 insertions(+), 505 deletions(-) create mode 100644 server/src/test/java/org/opensearch/index/IndexServiceTestUtils.java diff --git a/libs/common/src/main/java/org/opensearch/common/Explicit.java b/libs/common/src/main/java/org/opensearch/common/Explicit.java index e060baf6f187e..da44c6fd4dcef 100644 --- a/libs/common/src/main/java/org/opensearch/common/Explicit.java +++ b/libs/common/src/main/java/org/opensearch/common/Explicit.java @@ -32,6 +32,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.util.Objects; /** @@ -43,8 +45,9 @@ * field mapping settings it is preferable to preserve an explicit * choice rather than a choice made only made implicitly by defaults. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Explicit { private final T value; diff --git a/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java b/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java index 30bea6185febc..dfc4fefb9ee55 100644 --- a/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java +++ b/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java @@ -32,13 +32,16 @@ package org.opensearch.common.lease; +import org.opensearch.common.annotation.PublicApi; + import java.io.Closeable; /** * Specialization of {@link AutoCloseable} for calls that might not throw a checked exception. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Releasable extends Closeable { @Override diff --git a/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java b/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java index 041d8b1bffb4a..7525bfb243aae 100644 --- a/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java +++ b/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java @@ -32,6 +32,7 @@ package org.opensearch.core.action; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.transport.TransportResponse; @@ -42,6 +43,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ActionResponse extends TransportResponse { public ActionResponse() {} diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java index af9df51655414..960f4bec5eeb5 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java @@ -32,6 +32,8 @@ package org.opensearch.core.common.io.stream; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -41,8 +43,9 @@ * across the wire" using OpenSearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by * serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public interface Writeable { /** * A WriteableRegistry registers {@link Writer} methods for writing data types over a @@ -135,8 +138,11 @@ public static Class getCustomClassFromInstance(final Object value) { * out.writeMapOfLists(someMap, StreamOutput::writeString, StreamOutput::writeString); * } * + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "2.8.0") interface Writer { /** @@ -161,8 +167,11 @@ interface Writer { * this.someMap = in.readMapOfLists(StreamInput::readString, StreamInput::readString); * } * + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "2.8.0") interface Reader { /** diff --git a/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java b/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java index e50f24417f8bc..f4d81c4ca4363 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java +++ b/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java @@ -33,14 +33,16 @@ package org.opensearch.core.common.util; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import java.nio.ByteBuffer; /** * Abstraction of an array of byte values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ByteArray extends BigArray { /** diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java index 3ce8b4953b9d6..992655efec8f0 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java @@ -32,6 +32,7 @@ package org.opensearch.core.indices.breaker; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -43,8 +44,9 @@ /** * Stats class encapsulating all of the different circuit breaker stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AllCircuitBreakerStats implements Writeable, ToXContentFragment { /** An array of all the circuit breaker stats */ diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java index ee9c94f432a36..dedeb0803271f 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java @@ -32,8 +32,7 @@ package org.opensearch.core.indices.breaker; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.core.common.breaker.CircuitBreaker; @@ -41,11 +40,10 @@ * Interface for Circuit Breaker services, which provide breakers to classes * that load field data. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class CircuitBreakerService extends AbstractLifecycleComponent { - private static final Logger logger = LogManager.getLogger(CircuitBreakerService.class); - protected CircuitBreakerService() {} /** diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java index 9207d3ea77227..ee71cf8d2ac0e 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java @@ -32,6 +32,7 @@ package org.opensearch.core.indices.breaker; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -45,8 +46,9 @@ /** * Class encapsulating stats about the {@link org.opensearch.core.common.breaker.CircuitBreaker} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CircuitBreakerStats implements Writeable, ToXContentObject { /** The name of the circuit breaker */ diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java index 365b36c755dd2..04d0bce27c04f 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java @@ -32,6 +32,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import java.io.IOException; @@ -83,8 +84,9 @@ * It's highly recommended to use the high level declare methods like {@link #declareString(BiConsumer, ParseField)} instead of * {@link #declareField} which can be used to implement exceptional parsing operations not covered by the high level methods. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ObjectParser extends AbstractObjectParser implements BiFunction, diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java index f19c9db7874db..6b6a96dc42a84 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java @@ -14,7 +14,6 @@ import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractAsyncTask; import org.opensearch.common.util.concurrent.UncategorizedExecutionException; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; @@ -22,6 +21,7 @@ import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; +import org.opensearch.index.IndexServiceTestUtils; import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; @@ -175,7 +175,7 @@ public void testAsyncTrimTaskSucceeds() { logger.info("Increasing the frequency of async trim task to ensure it runs in background while indexing"); IndexService indexService = internalCluster().getInstance(IndicesService.class, dataNodeName).iterator().next(); - ((AbstractAsyncTask) indexService.getTrimTranslogTask()).setInterval(TimeValue.timeValueMillis(100)); + IndexServiceTestUtils.setTrimTranslogTaskInterval(indexService, TimeValue.timeValueMillis(100)); logger.info("--> Indexing data"); indexData(randomIntBetween(2, 5), true); diff --git a/server/src/main/java/org/opensearch/action/AliasesRequest.java b/server/src/main/java/org/opensearch/action/AliasesRequest.java index 3632ba2d7304f..8fe6f49c08af0 100644 --- a/server/src/main/java/org/opensearch/action/AliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/AliasesRequest.java @@ -32,14 +32,17 @@ package org.opensearch.action; +import org.opensearch.common.annotation.PublicApi; + /** * Needs to be implemented by all {@link org.opensearch.action.ActionRequest} subclasses that relate to * one or more indices and one or more aliases. Meant to be used for aliases management requests (e.g. add/remove alias, * get aliases) that hold aliases and indices in separate fields. * Allows to retrieve which indices and aliases the action relates to. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AliasesRequest extends IndicesRequest.Replaceable { /** diff --git a/server/src/main/java/org/opensearch/action/OriginalIndices.java b/server/src/main/java/org/opensearch/action/OriginalIndices.java index 1e24c64bc60fc..1c26bf9d416f5 100644 --- a/server/src/main/java/org/opensearch/action/OriginalIndices.java +++ b/server/src/main/java/org/opensearch/action/OriginalIndices.java @@ -33,6 +33,7 @@ package org.opensearch.action; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * Used to keep track of original indices within internal (e.g. shard level) requests * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class OriginalIndices implements IndicesRequest { // constant to use when original indices are not applicable and will not be serialized across the wire diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index f4705a21f5014..3c8f07613561d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.routing.allocation.AllocationDecision; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -58,8 +59,9 @@ * or if it is not unassigned, then which nodes it could possibly be relocated to. * It is an immutable class. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ClusterAllocationExplanation implements ToXContentObject, Writeable { private final ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 0b101070db478..e0f380b3ebbe6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.SnapshotsInProgress.State; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -68,8 +69,9 @@ /** * Status of a snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotStatus implements ToXContentObject, Writeable { private final Snapshot snapshot; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java index 475e44e1820d5..75c68350e2204 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -52,8 +53,9 @@ /** * Class for holding Rollover related information within an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RolloverInfo extends AbstractDiffable implements Writeable, ToXContentFragment { public static final ParseField CONDITION_FIELD = new ParseField("met_conditions"); diff --git a/server/src/main/java/org/opensearch/action/search/ParsedScrollId.java b/server/src/main/java/org/opensearch/action/search/ParsedScrollId.java index 70be9a0b19e08..b723b97b5c413 100644 --- a/server/src/main/java/org/opensearch/action/search/ParsedScrollId.java +++ b/server/src/main/java/org/opensearch/action/search/ParsedScrollId.java @@ -32,13 +32,16 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; + import java.util.Arrays; /** * Search scroll id that has been parsed * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ParsedScrollId { public static final String QUERY_THEN_FETCH_TYPE = "queryThenFetch"; diff --git a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java index 59c0e54bb6cbc..7c702c16030b7 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java +++ b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java @@ -33,6 +33,7 @@ package org.opensearch.action.search; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -43,8 +44,9 @@ /** * Id for a search context per node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SearchContextIdForNode implements Writeable { private final String node; private final ShardSearchContextId searchContextId; diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhase.java b/server/src/main/java/org/opensearch/action/search/SearchPhase.java index 1c7b3c1f1563c..0890e9f5de8d4 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhase.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; import java.util.Locale; @@ -40,8 +41,9 @@ /** * Base class for all individual search phases like collecting distributed frequencies, fetching documents, querying shards. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SearchPhase implements CheckedRunnable { private final String name; private long startTimeInNanos; diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java index 45d39a6f85ea2..0fa8569413eaf 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java @@ -34,6 +34,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.OriginalIndices; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.search.SearchPhaseResult; @@ -48,8 +49,9 @@ /** * This class provide contextual state and access to resources across multiple search phases. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface SearchPhaseContext extends Executor { // TODO maybe we can make this concrete later - for now we just implement this in the base class for all initial phases diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java index 89d725b56bded..ecf48b4a9f633 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java @@ -10,14 +10,16 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.annotation.PublicApi; import java.util.List; /** * A listener for search, fetch and context events at the coordinator node level * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface SearchRequestOperationsListener { void onPhaseStart(SearchPhaseContext context); diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java b/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java index fbd85a3fc0b8f..33d9bf70021a0 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.routing.PlainShardIterator; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.Countable; import org.opensearch.common.util.PlainIterator; @@ -54,8 +55,9 @@ * the cluster alias. * @see OriginalIndices * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SearchShardIterator implements Comparable, Countable { private final OriginalIndices originalIndices; diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index 54faaf363cb70..dfecf4f462c4d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -33,6 +33,7 @@ package org.opensearch.action.search; import org.opensearch.common.MemoizedSupplier; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.tasks.TaskId; import org.opensearch.search.fetch.ShardFetchSearchRequest; import org.opensearch.search.internal.ShardSearchRequest; @@ -46,8 +47,9 @@ * Task storing information about a currently running search shard request. * See {@link ShardSearchRequest}, {@link ShardFetchSearchRequest}, ... * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchShardTask extends CancellableTask implements SearchBackpressureTask { // generating metadata in a lazy way since source can be quite big private final MemoizedSupplier metadataSupplier; diff --git a/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java b/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java index 4d3f9b71a919c..ef490844db692 100644 --- a/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java +++ b/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java @@ -37,6 +37,7 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -56,8 +57,9 @@ /** * Represents a failure to search on a specific shard. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardSearchFailure extends ShardOperationFailedException { private static final String REASON_FIELD = "reason"; diff --git a/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java b/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java index 15275ba48fc6e..e91342a7ce4b8 100644 --- a/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java +++ b/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -48,8 +49,9 @@ * A class whose instances represent a value for counting the number * of active shard copies for a given shard in an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ActiveShardCount implements Writeable { private static final int ACTIVE_SHARD_COUNT_DEFAULT = -2; diff --git a/server/src/main/java/org/opensearch/action/support/RetryableAction.java b/server/src/main/java/org/opensearch/action/support/RetryableAction.java index fc2ee277b538a..d555a876f20fb 100644 --- a/server/src/main/java/org/opensearch/action/support/RetryableAction.java +++ b/server/src/main/java/org/opensearch/action/support/RetryableAction.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionRunnable; import org.opensearch.action.bulk.BackoffPolicy; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -52,8 +53,9 @@ * default. The action will be retried with exponentially increasing delay periods until the timeout period * has been reached. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class RetryableAction { private final Logger logger; diff --git a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java index c52bc6c2c799d..bb6bfc6fd4773 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java +++ b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java @@ -33,6 +33,7 @@ package org.opensearch.action.support.replication; import org.opensearch.action.support.RetryableAction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.index.shard.ShardId; @@ -51,8 +52,9 @@ /** * Pending Replication Actions * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingReplicationActions implements Consumer, Releasable { private final Map>> onGoingReplicationActions = ConcurrentCollections.newConcurrentMap(); diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java index f9b85cc4bd7aa..dd3a38c1398ab 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java @@ -8,11 +8,14 @@ package org.opensearch.action.support.replication; +import org.opensearch.common.annotation.PublicApi; + /** * The type of replication used for inter-node replication. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public enum ReplicationMode { /** * In this mode, a {@code TransportReplicationAction} is fanned out to underlying concerned shard and is replicated logically. diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java index 825405cb68549..67480976c500f 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java @@ -36,6 +36,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; @@ -57,8 +58,9 @@ /** * Base class for write action responses. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ReplicationResponse extends ActionResponse { public static final ReplicationResponse.ShardInfo.Failure[] EMPTY = new ReplicationResponse.ShardInfo.Failure[0]; @@ -88,8 +90,9 @@ public void setShardInfo(ShardInfo shardInfo) { /** * Holds shard information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardInfo implements Writeable, ToXContentObject { private static final String TOTAL = "total"; @@ -235,8 +238,9 @@ public String toString() { /** * Holds failure information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends ShardOperationFailedException implements ToXContentObject { private static final String _INDEX = "_index"; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterState.java b/server/src/main/java/org/opensearch/cluster/ClusterState.java index 1b87a60c2ccf5..9e63f961d241d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterState.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterState.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; @@ -97,8 +98,9 @@ * throws the {@link IncompatibleClusterStateVersionException}, which causes the publishing mechanism to send * a full version of the cluster state to the node on which this exception was thrown. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterState implements ToXContentFragment, Diffable { public static final ClusterState EMPTY_STATE = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); @@ -139,8 +141,9 @@ static boolean shouldSerializ /** * Custom cluster state. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Custom extends NamedDiffable, ToXContentFragment, FeatureAware { /** @@ -596,8 +599,9 @@ public static Builder builder(ClusterState state) { /** * Builder for cluster state. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final ClusterName clusterName; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java b/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java index 140e6426bb801..d7702729ae884 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java @@ -33,13 +33,15 @@ package org.opensearch.cluster; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; /** * A component that is in charge of applying an incoming cluster state to the node internal data structures. * The single apply method is called before the cluster state becomes visible via {@link ClusterService#state()}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateApplier { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java index 01a8e51a3d13e..57fcfcad099ed 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java @@ -32,11 +32,14 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; + /** * A listener to be notified when a cluster state changes. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java index 9a4b708548a7d..149d93a158007 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java @@ -33,13 +33,15 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Cluster state update task configuration for timeout and priority * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateTaskConfig { /** * The timeout for this cluster state update task configuration. If diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java index bf8494cc36857..115816798959e 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java @@ -33,6 +33,7 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import java.util.IdentityHashMap; import java.util.List; @@ -41,8 +42,9 @@ /** * Interface that updates the cluster state based on the task * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateTaskExecutor { /** * Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state @@ -103,8 +105,9 @@ default ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey * Represents the result of a batched execution of cluster state update tasks * @param the type of the cluster state update task * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class ClusterTasksResult { @Nullable public final ClusterState resultingState; @@ -127,8 +130,9 @@ public static Builder builder() { /** * Builder for cluster state task. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final Map executionResults = new IdentityHashMap<>(); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java index d6c4abfad7b8d..0cb24bd3f3eab 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java @@ -32,14 +32,16 @@ package org.opensearch.cluster; import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.common.annotation.PublicApi; import java.util.List; /** * Interface to implement a cluster state change listener * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateTaskListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java index 9225914a931b2..ae6626dd4785d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java @@ -34,6 +34,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import java.util.List; @@ -41,8 +42,9 @@ /** * A task that can update the cluster state. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java index c07dcc5daaee6..0b2ed7ef66d43 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java @@ -31,12 +31,15 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; + /** * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local * node cease being a cluster-manager). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface LocalNodeClusterManagerListener extends ClusterStateListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java index 31c0b294b8004..2487aaf0d7c51 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java @@ -31,14 +31,17 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.DeprecatedApi; + /** * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local * node cease being a cluster-manager). * - * @opensearch.internal + * @opensearch.api * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link LocalNodeClusterManagerListener} */ @Deprecated +@DeprecatedApi(since = "2.2.0") public interface LocalNodeMasterListener extends LocalNodeClusterManagerListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/NamedDiff.java b/server/src/main/java/org/opensearch/cluster/NamedDiff.java index ce971aa723394..e994cfb224386 100644 --- a/server/src/main/java/org/opensearch/cluster/NamedDiff.java +++ b/server/src/main/java/org/opensearch/cluster/NamedDiff.java @@ -33,13 +33,15 @@ package org.opensearch.cluster; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.NamedWriteable; /** * Diff that also support NamedWriteable interface * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface NamedDiff> extends Diff, NamedWriteable { /** * The minimal version of the recipient this custom object can be sent to diff --git a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java index 5b7bb19a935b1..1c12c260b3929 100644 --- a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Setting; @@ -81,8 +82,9 @@ * This component does not block on disconnections at all, because a disconnection might need to wait for an ongoing (background) connection * attempt to complete first. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodeConnectionsService extends AbstractLifecycleComponent { private static final Logger logger = LogManager.getLogger(NodeConnectionsService.class); diff --git a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java index 3e0c78099e6b4..769f97373f7b7 100644 --- a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -54,8 +55,9 @@ /** * Meta data about restore processes that are currently executing * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreInProgress extends AbstractNamedDiffable implements Custom, Iterable { /** @@ -139,8 +141,9 @@ public RestoreInProgress build() { /** * Restore metadata * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Entry { private final String uuid; private final State state; @@ -238,8 +241,9 @@ public int hashCode() { /** * Represents status of a restored shard * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardRestoreStatus implements Writeable { private State state; private String nodeId; @@ -363,8 +367,9 @@ public int hashCode() { /** * Shard restore process state * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { /** * Initializing state diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 952591f346536..3de23d2490c63 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -928,8 +929,9 @@ public String toString() { /** * State of the snapshots. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { INIT((byte) 0, false), STARTED((byte) 1, false), diff --git a/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java b/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java index eb31fa2b7e69d..f0fa5af64d157 100644 --- a/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java +++ b/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java @@ -32,13 +32,15 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * An exception to cluster state listener that allows for timeouts and for post added notifications. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TimeoutClusterStateListener extends ClusterStateListener { void postAdded(); diff --git a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java index db26496c6f263..837bf8af449ad 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java +++ b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java @@ -32,11 +32,14 @@ package org.opensearch.cluster.ack; +import org.opensearch.common.annotation.PublicApi; + /** * Base response returned after a cluster state update * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateUpdateResponse { private final boolean acknowledged; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java index 4673f075e8439..ea6c0c69c523d 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.block; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.rest.RestStatus; @@ -49,8 +50,9 @@ /** * Internal exception on obtaining a cluster block * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterBlockException extends OpenSearchException { private final Set blocks; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java index bcb4f7f71268e..304136166d515 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -59,8 +60,9 @@ /** * Represents current cluster level blocks to block dirty operations done against the cluster. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterBlocks extends AbstractDiffable { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(emptySet(), Map.of()); @@ -355,8 +357,9 @@ public static Builder builder() { /** * Builder for cluster blocks. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final Set global = new HashSet<>(); diff --git a/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java b/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java index 729d76c72e99e..3e9fa3e23a12f 100644 --- a/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java +++ b/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.block; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -16,8 +17,9 @@ /** * Internal exception on obtaining an index create block * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexCreateBlockException extends ClusterBlockException { public IndexCreateBlockException(Set globalLevelBlocks) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java index 3a506397bcac8..0ef8262a216ee 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java @@ -34,14 +34,16 @@ import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; /** * Publishes the cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStatePublisher { /** * Publish all the changes to the cluster from the cluster-manager (can be called just by the cluster-manager). The publish @@ -59,8 +61,9 @@ public interface ClusterStatePublisher { /** * An acknowledgement listener. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface AckListener { /** * Should be called when the cluster coordination layer has committed the cluster state (i.e. even if this publication fails, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java index 869c86ae75b90..53398d6f3f98f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.coordination; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.set.Sets; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -55,8 +56,9 @@ /** * Metadata for cluster coordination * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CoordinationMetadata implements Writeable, ToXContentFragment { public static final CoordinationMetadata EMPTY_METADATA = builder().build(); @@ -214,8 +216,9 @@ public String toString() { /** * Builder for coordination metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private long term = 0; private VotingConfiguration lastCommittedConfiguration = VotingConfiguration.EMPTY_CONFIG; @@ -266,8 +269,9 @@ public CoordinationMetadata build() { /** * Excluded nodes from voting config. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class VotingConfigExclusion implements Writeable, ToXContentFragment { public static final String MISSING_VALUE_MARKER = "_absent_"; private final String nodeId; @@ -362,8 +366,9 @@ public String toString() { /** * A collection of persistent node ids, denoting the voting configuration for cluster state changes. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class VotingConfiguration implements Writeable, ToXContentFragment { public static final VotingConfiguration EMPTY_CONFIG = new VotingConfiguration(Collections.emptySet()); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java index 4d466c4b3ad73..0b7ed4fee5775 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.coordination; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -22,8 +23,9 @@ /** * Persisted cluster state related stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.12.0") public class PersistedStateStats implements Writeable, ToXContentObject { private final String statsName; private AtomicLong totalTimeInMillis = new AtomicLong(0); diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java index 3df807a4f94d3..254d70b0422d7 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java @@ -14,6 +14,7 @@ import org.opensearch.cluster.NamedDiff; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.Metadata.Custom; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -30,8 +31,9 @@ /** * Contains metadata about decommission attribute * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionAttributeMetadata extends AbstractNamedDiffable implements Custom { public static final String TYPE = "decommissionedAttribute"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java index cab1b75afe3e1..abc3712ee07e3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -55,8 +56,9 @@ * contains a field "foo", it's expected to contain all the necessary settings/mappings/etc for the * "foo" field. These component templates make up the individual pieces composing an index template. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ComponentTemplate extends AbstractDiffable implements ToXContentObject { private static final ParseField TEMPLATE = new ParseField("template"); private static final ParseField VERSION = new ParseField("version"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java index 5f1291f6c6d82..e7f1b97f28842 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.Diff; import org.opensearch.cluster.metadata.DataStream.TimestampField; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -63,8 +64,9 @@ * ids corresponding to component templates that should be composed in order when creating a new * index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ComposableIndexTemplate extends AbstractDiffable implements ToXContentObject { private static final ParseField INDEX_PATTERNS = new ParseField("index_patterns"); private static final ParseField TEMPLATE = new ParseField("template"); @@ -284,8 +286,9 @@ public String toString() { /** * Template for data stream. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DataStreamTemplate implements Writeable, ToXContentObject { private static final ParseField TIMESTAMP_FIELD_FIELD = new ParseField("timestamp_field"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java index 8209c7bb56ad8..a8102182576ff 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.Diff; import org.opensearch.cluster.Diffable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ * This is a {@code Map} that implements AbstractDiffable so it * can be used for cluster state purposes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DiffableStringMap extends AbstractMap implements Diffable { public static final DiffableStringMap EMPTY = new DiffableStringMap(Collections.emptyMap()); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java index 0c316373e484f..0f3ee894a7f63 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java @@ -33,6 +33,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.core.common.Strings; @@ -52,8 +53,9 @@ * An index abstraction has a unique name and encapsulates all the {@link IndexMetadata} instances it is pointing to. * Also depending on type it may refer to a single or many concrete indices and may or may not have a write index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndexAbstraction { /** @@ -102,7 +104,10 @@ public interface IndexAbstraction { /** * An index abstraction type. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum Type { /** @@ -335,8 +340,9 @@ private boolean isNonEmpty(List idxMetas) { /** * A data stream. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class DataStream implements IndexAbstraction { private final org.opensearch.cluster.metadata.DataStream dataStream; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java index 85a203e5e059a..b4f8b6b188531 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.Diff; import org.opensearch.cluster.NamedDiff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; @@ -68,8 +69,9 @@ * tombstones remain in the cluster state for a fixed period of time, after which * they are purged. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexGraveyard implements Metadata.Custom { /** @@ -191,8 +193,9 @@ public static IndexGraveyard.Builder builder(final IndexGraveyard graveyard) { /** * A class to build an IndexGraveyard. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class Builder { private List tombstones; private int numPurged = -1; @@ -367,8 +370,9 @@ public String getWriteableName() { /** * An individual tombstone entry for representing a deleted index. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class Tombstone implements ToXContentObject, Writeable { private static final String INDEX_KEY = "index"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 2e1421c278879..03784df509ed6 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -97,8 +97,9 @@ /** * Index metadata information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexMetadata implements Diffable, ToXContentFragment { public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock( @@ -160,8 +161,9 @@ public class IndexMetadata implements Diffable, ToXContentFragmen /** * The state of the index. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { OPEN((byte) 0), CLOSE((byte) 1); @@ -1211,8 +1213,9 @@ public static Builder builder(IndexMetadata indexMetadata) { /** * Builder of index metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private String index; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 70c1d059a1b9e..1871ed24973c2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -98,8 +99,9 @@ /** * Metadata information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Metadata implements Iterable, Diffable, ToXContentFragment { private static final Logger logger = LogManager.getLogger(Metadata.class); @@ -127,8 +129,9 @@ public boolean isSegmentReplicationEnabled(String indexName) { /** * Context of the XContent. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum XContentContext { /* Custom metadata should be returns as part of API call */ API, @@ -166,8 +169,9 @@ public enum XContentContext { /** * Custom metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Custom extends NamedDiffable, ToXContentFragment, ClusterState.FeatureAware { EnumSet context(); @@ -1129,8 +1133,9 @@ public static Builder builder(Metadata metadata) { /** * Builder of metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private String clusterUUID; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Template.java b/server/src/main/java/org/opensearch/cluster/metadata/Template.java index 8e367c71ed166..bd110c6af8975 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Template.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentHelper; @@ -58,8 +59,9 @@ * it is entirely independent from an index. It's a building block forming part of a regular index * template and a {@link ComponentTemplate}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Template extends AbstractDiffable