From ac9147b5a45f43541decb43f5d8cb27c16e35cd9 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Mon, 11 Mar 2024 16:38:36 -0700 Subject: [PATCH 01/11] Bump org.codehaus.woodstox:stax2-api from 4.2.1 to 4.2.2 in /plugins/repository-azure (#12579) (#12598) * Bump org.codehaus.woodstox:stax2-api in /plugins/repository-azure Bumps [org.codehaus.woodstox:stax2-api](https://github.com/FasterXML/stax2-api) from 4.2.1 to 4.2.2. - [Commits](https://github.com/FasterXML/stax2-api/compare/stax2-api-4.2.1...stax2-api-4.2.2) --- updated-dependencies: - dependency-name: org.codehaus.woodstox:stax2-api dependency-type: direct:production update-type: version-update:semver-patch ... * Updating SHAs * Update changelog --------- (cherry picked from commit 246557c4b5ee71187f2dc98ebfa93409a187037e) Signed-off-by: dependabot[bot] Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 | 1 - plugins/repository-azure/licenses/stax2-api-4.2.2.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/stax2-api-4.2.2.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index b9443c362176f..8e8358908e853 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `ch.qos.logback:logback-core` from 1.2.13 to 1.5.3 ([#12519](https://github.com/opensearch-project/OpenSearch/pull/12519)) - Bump `codecov/codecov-action` from 3 to 4 ([#12585](https://github.com/opensearch-project/OpenSearch/pull/12585)) - Bump `org.apache.zookeeper:zookeeper` from 3.9.1 to 3.9.2 ([#12580](https://github.com/opensearch-project/OpenSearch/pull/12580)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#12579](https://github.com/opensearch-project/OpenSearch/pull/12579)) ### Changed - Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 1425b73232cf4..c3f9a26c33cdc 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -64,7 +64,7 @@ dependencies { api "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${versions.jackson}" api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.jackson}" api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" - api 'org.codehaus.woodstox:stax2-api:4.2.1' + api 'org.codehaus.woodstox:stax2-api:4.2.2' implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" runtimeOnly "com.google.guava:guava:${versions.guava}" api "org.apache.commons:commons-lang3:${versions.commonslang}" diff --git a/plugins/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 b/plugins/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 deleted file mode 100644 index 2c12704cdc560..0000000000000 --- a/plugins/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3f7325c52240418c2ba257b103c3c550e140c83 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/stax2-api-4.2.2.jar.sha1 b/plugins/repository-azure/licenses/stax2-api-4.2.2.jar.sha1 new file mode 100644 index 0000000000000..b15a7ead0d016 --- /dev/null +++ b/plugins/repository-azure/licenses/stax2-api-4.2.2.jar.sha1 @@ -0,0 +1 @@ +b0d746cadea928e5264f2ea294ea9a1bf815bbde \ No newline at end of file From 30b7a95f5b7f6e7b5fefb97b858236eebdf25411 Mon Sep 17 00:00:00 2001 From: kkewwei Date: Tue, 12 Mar 2024 20:42:59 +0800 Subject: [PATCH 02/11] [Backport 2.x] onShardResult and onShardFailure are executed on one shard causes opensearch jvm crashed (#12612) * onShardResult and onShardFailure are executed on one shard causes opensearch jvm crashed Signed-off-by: kkewwei * unit test Signed-off-by: kkewwei * spotlessJavaCheck Signed-off-by: kkewwei * rename variable names Signed-off-by: kkewwei * add changelog Signed-off-by: kkewwei --------- Signed-off-by: kkewwei --- CHANGELOG.md | 1 + .../search/AbstractSearchAsyncAction.java | 8 +++++- .../AbstractSearchAsyncActionTests.java | 25 +++++++++++++++++-- 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e8358908e853..fc885993551db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix `terms` query on `float` field when `doc_values` are turned off by reverting back to `FloatPoint` from `FloatField` ([#12499](https://github.com/opensearch-project/OpenSearch/pull/12499)) - Fix get task API does not refresh resource stats ([#11531](https://github.com/opensearch-project/OpenSearch/pull/11531)) - Fix for deserilization bug in weighted round-robin metadata ([#11679](https://github.com/opensearch-project/OpenSearch/pull/11679)) +- onShardResult and onShardFailure are executed on one shard causes opensearch jvm crashed ([#12158](https://github.com/opensearch-project/OpenSearch/pull/12158)) ### Security diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 9e1d065c96dd6..3b99ea9cc1cd2 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -286,6 +286,7 @@ private void performPhaseOnShard(final int shardIndex, final SearchShardIterator Runnable r = () -> { final Thread thread = Thread.currentThread(); try { + final SearchPhase phase = this; executePhaseOnShard(shardIt, shard, new SearchActionListener(shard, shardIndex) { @Override public void innerOnResponse(Result result) { @@ -299,7 +300,12 @@ public void innerOnResponse(Result result) { @Override public void onFailure(Exception t) { try { - onShardFailure(shardIndex, shard, shardIt, t); + // It only happens when onPhaseDone() is called and executePhaseOnShard() fails hard with an exception. + if (totalOps.get() == expectedTotalOps) { + onPhaseFailure(phase, "The phase has failed", t); + } else { + onShardFailure(shardIndex, shard, shardIt, t); + } } finally { executeNext(pendingExecutions, thread); } diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index 601aa9dc1856e..3af7af114e96d 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -151,6 +151,7 @@ private AbstractSearchAsyncAction createAction( listener, controlled, false, + false, expected, new SearchShardIterator(null, null, Collections.emptyList(), null) ); @@ -162,6 +163,7 @@ private AbstractSearchAsyncAction createAction( ActionListener listener, final boolean controlled, final boolean failExecutePhaseOnShard, + final boolean catchExceptionWhenExecutePhaseOnShard, final AtomicLong expected, final SearchShardIterator... shards ) { @@ -221,7 +223,15 @@ protected void executePhaseOnShard( if (failExecutePhaseOnShard) { listener.onFailure(new ShardNotFoundException(shardIt.shardId())); } else { - listener.onResponse(new QuerySearchResult()); + if (catchExceptionWhenExecutePhaseOnShard) { + try { + listener.onResponse(new QuerySearchResult()); + } catch (Exception e) { + listener.onFailure(e); + } + } else { + listener.onResponse(new QuerySearchResult()); + } } } @@ -509,6 +519,7 @@ public void onFailure(Exception e) { }, false, true, + false, new AtomicLong(), shards ); @@ -555,6 +566,7 @@ public void onFailure(Exception e) { }, false, false, + false, new AtomicLong(), shards ); @@ -570,7 +582,7 @@ public void onFailure(Exception e) { assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); } - public void testExecutePhaseOnShardFailure() throws InterruptedException { + private void innerTestExecutePhaseOnShardFailure(boolean catchExceptionWhenExecutePhaseOnShard) throws InterruptedException { final Index index = new Index("test", UUID.randomUUID().toString()); final SearchShardIterator[] shards = IntStream.range(0, 2 + randomInt(3)) @@ -606,6 +618,7 @@ public void onFailure(Exception e) { }, false, false, + catchExceptionWhenExecutePhaseOnShard, new AtomicLong(), shards ); @@ -621,6 +634,14 @@ public void onFailure(Exception e) { assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); } + public void testExecutePhaseOnShardFailure() throws InterruptedException { + innerTestExecutePhaseOnShardFailure(false); + } + + public void testExecutePhaseOnShardFailureAndThrowException() throws InterruptedException { + innerTestExecutePhaseOnShardFailure(true); + } + public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedException { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); SearchRequestStats testListener = new SearchRequestStats(clusterSettings); From 4e40668a1e2de71b0dddc8ae9436f98a5957770b Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 12 Mar 2024 10:54:28 -0400 Subject: [PATCH 03/11] The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers (#12586) (#12613) Signed-off-by: Andriy Redko (cherry picked from commit 07e79e3bec7732cc347bf36d934631e6f87a8216) --- CHANGELOG.md | 1 + .../org/opensearch/bootstrap/Security.java | 71 ++++++++++++++----- .../opensearch/bootstrap/SecurityTests.java | 23 ++++++ .../bootstrap/test-codebases.policy | 25 +++++++ .../org/opensearch/bootstrap/test.policy | 13 ++++ 5 files changed, 116 insertions(+), 17 deletions(-) create mode 100644 server/src/test/resources/org/opensearch/bootstrap/test-codebases.policy create mode 100644 server/src/test/resources/org/opensearch/bootstrap/test.policy diff --git a/CHANGELOG.md b/CHANGELOG.md index fc885993551db..94e8638ef023c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063)) - Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835)) - [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) +- The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586)) ### Dependencies - Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290)) diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 3e4fc134fa5c6..0e8559578b916 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -66,6 +66,8 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import static org.opensearch.bootstrap.FilePermissionUtils.addDirectoryPath; import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath; @@ -121,6 +123,8 @@ */ @SuppressWarnings("removal") final class Security { + private static final Pattern CODEBASE_JAR_WITH_CLASSIFIER = Pattern.compile("^(.+)-\\d+\\.\\d+[^-]*.*?[-]?([^-]+)?\\.jar$"); + /** no instantiation */ private Security() {} @@ -231,33 +235,45 @@ static Policy readPolicy(URL policyFile, Map codebases) { try { List propertiesSet = new ArrayList<>(); try { + final Map, String> jarsWithPossibleClassifiers = new HashMap<>(); // set codebase properties for (Map.Entry codebase : codebases.entrySet()) { - String name = codebase.getKey(); - URL url = codebase.getValue(); + final String name = codebase.getKey(); + final URL url = codebase.getValue(); // We attempt to use a versionless identifier for each codebase. This assumes a specific version // format in the jar filename. While we cannot ensure all jars in all plugins use this format, nonconformity // only means policy grants would need to include the entire jar filename as they always have before. + final Matcher matcher = CODEBASE_JAR_WITH_CLASSIFIER.matcher(name); + if (matcher.matches() && matcher.group(2) != null) { + // There is a JAR that, possibly, has a classifier or SNAPSHOT at the end, examples are: + // - netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar + // - kafka-server-common-3.6.1-test.jar + // - lucene-core-9.11.0-snapshot-8a555eb.jar + // - zstd-jni-1.5.5-5.jar + jarsWithPossibleClassifiers.put(codebase, matcher.group(2)); + } else { + String property = "codebase." + name; + String aliasProperty = "codebase." + name.replaceFirst("-\\d+\\.\\d+.*\\.jar", ""); + addCodebaseToSystemProperties(propertiesSet, url, property, aliasProperty); + } + } + + // set codebase properties for JARs that might present with classifiers + for (Map.Entry, String> jarWithPossibleClassifier : jarsWithPossibleClassifiers.entrySet()) { + final Map.Entry codebase = jarWithPossibleClassifier.getKey(); + final String name = codebase.getKey(); + final URL url = codebase.getValue(); + String property = "codebase." + name; String aliasProperty = "codebase." + name.replaceFirst("-\\d+\\.\\d+.*\\.jar", ""); - if (aliasProperty.equals(property) == false) { - propertiesSet.add(aliasProperty); - String previous = System.setProperty(aliasProperty, url.toString()); - if (previous != null) { - throw new IllegalStateException( - "codebase property already set: " + aliasProperty + " -> " + previous + ", cannot set to " + url.toString() - ); - } - } - propertiesSet.add(property); - String previous = System.setProperty(property, url.toString()); - if (previous != null) { - throw new IllegalStateException( - "codebase property already set: " + property + " -> " + previous + ", cannot set to " + url.toString() - ); + if (System.getProperties().containsKey(aliasProperty)) { + aliasProperty = aliasProperty + "@" + jarWithPossibleClassifier.getValue(); } + + addCodebaseToSystemProperties(propertiesSet, url, property, aliasProperty); } + return Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toURI())); } finally { // clear codebase properties @@ -270,6 +286,27 @@ static Policy readPolicy(URL policyFile, Map codebases) { } } + /** adds the codebase to properties and System properties */ + @SuppressForbidden(reason = "accesses System properties to configure codebases") + private static void addCodebaseToSystemProperties(List propertiesSet, final URL url, String property, String aliasProperty) { + if (aliasProperty.equals(property) == false) { + propertiesSet.add(aliasProperty); + String previous = System.setProperty(aliasProperty, url.toString()); + if (previous != null) { + throw new IllegalStateException( + "codebase property already set: " + aliasProperty + " -> " + previous + ", cannot set to " + url.toString() + ); + } + } + propertiesSet.add(property); + String previous = System.setProperty(property, url.toString()); + if (previous != null) { + throw new IllegalStateException( + "codebase property already set: " + property + " -> " + previous + ", cannot set to " + url.toString() + ); + } + } + /** returns dynamic Permissions to configured paths and bind ports */ static Permissions createPermissions(Environment environment) throws IOException { Permissions policy = new Permissions(); diff --git a/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java b/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java index 69e561bb8fd89..76353aea03257 100644 --- a/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java @@ -35,8 +35,12 @@ import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Map; public class SecurityTests extends OpenSearchTestCase { @@ -80,4 +84,23 @@ public void testProcessExecution() throws Exception { fail("didn't get expected exception"); } catch (SecurityException expected) {} } + + public void testReadPolicyWithCodebases() throws IOException { + final Map codebases = Map.of( + "test-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar", + new URL("file://test-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar"), + "test-kafka-server-common-3.6.1.jar", + new URL("file://test-kafka-server-common-3.6.1.jar"), + "test-kafka-server-common-3.6.1-test.jar", + new URL("file://test-kafka-server-common-3.6.1-test.jar"), + "test-lucene-core-9.11.0-snapshot-8a555eb.jar", + new URL("file://test-lucene-core-9.11.0-snapshot-8a555eb.jar"), + "test-zstd-jni-1.5.5-5.jar", + new URL("file://test-zstd-jni-1.5.5-5.jar") + ); + + AccessController.doPrivileged( + (PrivilegedAction) () -> Security.readPolicy(SecurityTests.class.getResource("test-codebases.policy"), codebases) + ); + } } diff --git a/server/src/test/resources/org/opensearch/bootstrap/test-codebases.policy b/server/src/test/resources/org/opensearch/bootstrap/test-codebases.policy new file mode 100644 index 0000000000000..0dc754ccb0c57 --- /dev/null +++ b/server/src/test/resources/org/opensearch/bootstrap/test-codebases.policy @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +//// additional test framework permissions. +//// These are mock objects and test management that we allow test framework libs +//// to provide on our behalf. But tests themselves cannot do this stuff! + +grant codeBase "${codebase.zstd-jni}" { +}; + +grant codeBase "${codebase.kafka-server-common}" { +}; + +grant codeBase "${codebase.kafka-server-common@test}" { +}; diff --git a/server/src/test/resources/org/opensearch/bootstrap/test.policy b/server/src/test/resources/org/opensearch/bootstrap/test.policy new file mode 100644 index 0000000000000..7b0a9b3d5d709 --- /dev/null +++ b/server/src/test/resources/org/opensearch/bootstrap/test.policy @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + // allow to test Security policy and codebases + permission java.util.PropertyPermission "*", "read,write"; + permission java.security.SecurityPermission "createPolicy.JavaPolicy"; +}; From b3125a0e41740569c343bae2d6599629ad912e85 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 12 Mar 2024 13:07:57 -0400 Subject: [PATCH 04/11] Bump up jackson version to 2.16.2 (#12611) (#12614) * Bump up jackson to 2.16.2 * Add changelog entry --------- (cherry picked from commit 69fc7dd5bf53c22bac9d4938b408cc56f7011c39) Signed-off-by: Aman Khare Signed-off-by: Andriy Redko Co-authored-by: Aman Khare <85096200+amkhar@users.noreply.github.com> Co-authored-by: Aman Khare --- CHANGELOG.md | 1 + buildSrc/version.properties | 4 ++-- client/sniffer/licenses/jackson-core-2.16.1.jar.sha1 | 1 - client/sniffer/licenses/jackson-core-2.16.2.jar.sha1 | 1 + .../upgrade-cli/licenses/jackson-annotations-2.16.1.jar.sha1 | 1 - .../upgrade-cli/licenses/jackson-annotations-2.16.2.jar.sha1 | 1 + .../upgrade-cli/licenses/jackson-databind-2.16.1.jar.sha1 | 1 - .../upgrade-cli/licenses/jackson-databind-2.16.2.jar.sha1 | 1 + libs/core/licenses/jackson-core-2.16.1.jar.sha1 | 1 - libs/core/licenses/jackson-core-2.16.2.jar.sha1 | 1 + libs/x-content/licenses/jackson-core-2.16.1.jar.sha1 | 1 - libs/x-content/licenses/jackson-core-2.16.2.jar.sha1 | 1 + .../licenses/jackson-dataformat-cbor-2.16.1.jar.sha1 | 1 - .../licenses/jackson-dataformat-cbor-2.16.2.jar.sha1 | 1 + .../licenses/jackson-dataformat-smile-2.16.1.jar.sha1 | 1 - .../licenses/jackson-dataformat-smile-2.16.2.jar.sha1 | 1 + .../licenses/jackson-dataformat-yaml-2.16.1.jar.sha1 | 1 - .../licenses/jackson-dataformat-yaml-2.16.2.jar.sha1 | 1 + .../ingest-geoip/licenses/jackson-annotations-2.16.1.jar.sha1 | 1 - .../ingest-geoip/licenses/jackson-annotations-2.16.2.jar.sha1 | 1 + .../ingest-geoip/licenses/jackson-databind-2.16.1.jar.sha1 | 1 - .../ingest-geoip/licenses/jackson-databind-2.16.2.jar.sha1 | 1 + .../crypto-kms/licenses/jackson-annotations-2.16.1.jar.sha1 | 1 - .../crypto-kms/licenses/jackson-annotations-2.16.2.jar.sha1 | 1 + plugins/crypto-kms/licenses/jackson-databind-2.16.1.jar.sha1 | 1 - plugins/crypto-kms/licenses/jackson-databind-2.16.2.jar.sha1 | 1 + .../licenses/jackson-annotations-2.16.1.jar.sha1 | 1 - .../licenses/jackson-annotations-2.16.2.jar.sha1 | 1 + .../discovery-ec2/licenses/jackson-databind-2.16.1.jar.sha1 | 1 - .../discovery-ec2/licenses/jackson-databind-2.16.2.jar.sha1 | 1 + .../licenses/jackson-annotations-2.16.1.jar.sha1 | 1 - .../licenses/jackson-annotations-2.16.2.jar.sha1 | 1 + .../licenses/jackson-databind-2.16.1.jar.sha1 | 1 - .../licenses/jackson-databind-2.16.2.jar.sha1 | 1 + .../licenses/jackson-dataformat-xml-2.16.1.jar.sha1 | 1 - .../licenses/jackson-dataformat-xml-2.16.2.jar.sha1 | 1 + .../licenses/jackson-datatype-jsr310-2.16.1.jar.sha1 | 1 - .../licenses/jackson-datatype-jsr310-2.16.2.jar.sha1 | 1 + .../licenses/jackson-module-jaxb-annotations-2.16.1.jar.sha1 | 1 - .../licenses/jackson-module-jaxb-annotations-2.16.2.jar.sha1 | 1 + .../licenses/jackson-annotations-2.16.1.jar.sha1 | 1 - .../licenses/jackson-annotations-2.16.2.jar.sha1 | 1 + .../repository-s3/licenses/jackson-databind-2.16.1.jar.sha1 | 1 - .../repository-s3/licenses/jackson-databind-2.16.2.jar.sha1 | 1 + 44 files changed, 24 insertions(+), 23 deletions(-) delete mode 100644 client/sniffer/licenses/jackson-core-2.16.1.jar.sha1 create mode 100644 client/sniffer/licenses/jackson-core-2.16.2.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.1.jar.sha1 create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.2.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.1.jar.sha1 create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.2.jar.sha1 delete mode 100644 libs/core/licenses/jackson-core-2.16.1.jar.sha1 create mode 100644 libs/core/licenses/jackson-core-2.16.2.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-core-2.16.1.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-core-2.16.2.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-cbor-2.16.1.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-cbor-2.16.2.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-smile-2.16.1.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-smile-2.16.2.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-yaml-2.16.1.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-yaml-2.16.2.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-annotations-2.16.1.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-annotations-2.16.2.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.16.1.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.16.2.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/jackson-annotations-2.16.1.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/jackson-annotations-2.16.2.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/jackson-databind-2.16.1.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/jackson-databind-2.16.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-annotations-2.16.1.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-annotations-2.16.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.16.1.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.16.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-annotations-2.16.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-annotations-2.16.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-databind-2.16.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-databind-2.16.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.2.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-annotations-2.16.1.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-annotations-2.16.2.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-databind-2.16.1.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-databind-2.16.2.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 94e8638ef023c..17c9410d6d466 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `codecov/codecov-action` from 3 to 4 ([#12585](https://github.com/opensearch-project/OpenSearch/pull/12585)) - Bump `org.apache.zookeeper:zookeeper` from 3.9.1 to 3.9.2 ([#12580](https://github.com/opensearch-project/OpenSearch/pull/12580)) - Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#12579](https://github.com/opensearch-project/OpenSearch/pull/12579)) +- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) ### Changed - Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 271f2cc17713f..641eab71d6b2a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -7,8 +7,8 @@ bundled_jdk = 21.0.2+13 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.16.1 -jackson_databind = 2.16.1 +jackson = 2.16.2 +jackson_databind = 2.16.2 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 diff --git a/client/sniffer/licenses/jackson-core-2.16.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.16.1.jar.sha1 deleted file mode 100644 index 908d071b34a2a..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9456bb3cdd0f79f91a5f730a1b1bb041a380c91f \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.16.2.jar.sha1 b/client/sniffer/licenses/jackson-core-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..68646a1e66ffc --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.16.2.jar.sha1 @@ -0,0 +1 @@ +b4f588bf070f77b604c645a7d60b71eae2e6ea09 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.1.jar.sha1 deleted file mode 100644 index cbc65687606fc..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..d7dfc5ff83706 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +dfcd11c847ea7276aa073c25f5fe8ee361748d7f \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.1.jar.sha1 deleted file mode 100644 index d231db4fd49fc..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..86998b4558461 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.2.jar.sha1 @@ -0,0 +1 @@ +7fda67535b54d74eebf6157682b835c847410932 \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.16.1.jar.sha1 b/libs/core/licenses/jackson-core-2.16.1.jar.sha1 deleted file mode 100644 index 908d071b34a2a..0000000000000 --- a/libs/core/licenses/jackson-core-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9456bb3cdd0f79f91a5f730a1b1bb041a380c91f \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.16.2.jar.sha1 b/libs/core/licenses/jackson-core-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..68646a1e66ffc --- /dev/null +++ b/libs/core/licenses/jackson-core-2.16.2.jar.sha1 @@ -0,0 +1 @@ +b4f588bf070f77b604c645a7d60b71eae2e6ea09 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.16.1.jar.sha1 b/libs/x-content/licenses/jackson-core-2.16.1.jar.sha1 deleted file mode 100644 index 908d071b34a2a..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9456bb3cdd0f79f91a5f730a1b1bb041a380c91f \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.16.2.jar.sha1 b/libs/x-content/licenses/jackson-core-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..68646a1e66ffc --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.16.2.jar.sha1 @@ -0,0 +1 @@ +b4f588bf070f77b604c645a7d60b71eae2e6ea09 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.16.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.1.jar.sha1 deleted file mode 100644 index b4b781f604910..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1be7098dccc079171464dca7e386bd8df623b031 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.16.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..828f73962c333 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.2.jar.sha1 @@ -0,0 +1 @@ +1a1a3036016ea2ae3061c0bb46cba6968ff7faae \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.16.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.16.1.jar.sha1 deleted file mode 100644 index ad91e748ebe94..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c4ddbc5277670f2e56b1f5e44e83afa748bcb125 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.16.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..e50314e0dd746 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.16.2.jar.sha1 @@ -0,0 +1 @@ +209fd9ae0e6c6b233b0c14baa8f17acea71e5766 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.16.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.1.jar.sha1 deleted file mode 100644 index 9b30e7bf921b2..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e4f1923d73cd55f2b4c0d56ee4ed80419297354 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.16.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..feb51c61bfc7c --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.2.jar.sha1 @@ -0,0 +1 @@ +13088f6762211f264bc0ebf5467be96d8e9e3ebf \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.16.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.16.1.jar.sha1 deleted file mode 100644 index cbc65687606fc..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.16.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..d7dfc5ff83706 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +dfcd11c847ea7276aa073c25f5fe8ee361748d7f \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.16.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.16.1.jar.sha1 deleted file mode 100644 index d231db4fd49fc..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.16.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..86998b4558461 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.16.2.jar.sha1 @@ -0,0 +1 @@ +7fda67535b54d74eebf6157682b835c847410932 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.16.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.16.1.jar.sha1 deleted file mode 100644 index cbc65687606fc..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.16.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..d7dfc5ff83706 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +dfcd11c847ea7276aa073c25f5fe8ee361748d7f \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.16.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.16.1.jar.sha1 deleted file mode 100644 index d231db4fd49fc..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-databind-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.16.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..86998b4558461 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-databind-2.16.2.jar.sha1 @@ -0,0 +1 @@ +7fda67535b54d74eebf6157682b835c847410932 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.16.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.1.jar.sha1 deleted file mode 100644 index cbc65687606fc..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.16.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..d7dfc5ff83706 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +dfcd11c847ea7276aa073c25f5fe8ee361748d7f \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.16.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.16.1.jar.sha1 deleted file mode 100644 index d231db4fd49fc..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.16.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..86998b4558461 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.16.2.jar.sha1 @@ -0,0 +1 @@ +7fda67535b54d74eebf6157682b835c847410932 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.16.1.jar.sha1 deleted file mode 100644 index cbc65687606fc..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.16.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..d7dfc5ff83706 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +dfcd11c847ea7276aa073c25f5fe8ee361748d7f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.16.1.jar.sha1 deleted file mode 100644 index d231db4fd49fc..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.16.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..86998b4558461 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.16.2.jar.sha1 @@ -0,0 +1 @@ +7fda67535b54d74eebf6157682b835c847410932 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.1.jar.sha1 deleted file mode 100644 index ad4e055d4f19a..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d952ad30d3f2d1220f39db175618414b56d14638 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..4f353d6166121 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.2.jar.sha1 @@ -0,0 +1 @@ +796c3141d3bbcf67dc06751695dca116b2838a73 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.1.jar.sha1 deleted file mode 100644 index 4309dad93b2b6..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -36a418325c618e440e5ccb80b75c705d894f50bd \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..94266f5d40e62 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.2.jar.sha1 @@ -0,0 +1 @@ +58e86108e4b1b1e893e7a69b1bbca880acfca143 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.1.jar.sha1 deleted file mode 100644 index 5f54d0ac554e0..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e9df364a2695e66eb8d2803d6725424842760125 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..7fa3014c053a1 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +d1274db656edefe242fbd26d3266f7b4abb6f57b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.16.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.16.1.jar.sha1 deleted file mode 100644 index cbc65687606fc..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.16.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..d7dfc5ff83706 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +dfcd11c847ea7276aa073c25f5fe8ee361748d7f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.16.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.16.1.jar.sha1 deleted file mode 100644 index d231db4fd49fc..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.16.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..86998b4558461 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.16.2.jar.sha1 @@ -0,0 +1 @@ +7fda67535b54d74eebf6157682b835c847410932 \ No newline at end of file From 4bfc93dbcbd4ba1e4d36d3d3b33e5d1dd3b3cfbe Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 12 Mar 2024 10:11:29 -0700 Subject: [PATCH 05/11] Fix PluginInfo bwc for opensearch_version field (#12543) (#12605) * Fix PluginInfo bwc for opensearch_version field * Update how empty range list is handled --------- (cherry picked from commit a702f6a6e756d5c56e6ff04784bca4e70b40f9a5) Signed-off-by: Abhilasha Seth Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- .../org/opensearch/plugins/PluginInfo.java | 4 +-- .../opensearch/plugins/PluginInfoTests.java | 28 +++++++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java index 12f6720b954ba..7cf24d7902095 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java @@ -450,7 +450,7 @@ public List getOpenSearchVersionRanges() { */ public String getOpenSearchVersionRangesString() { if (opensearchVersionRanges == null || opensearchVersionRanges.isEmpty()) { - return ""; + throw new IllegalStateException("Opensearch version ranges list cannot be empty"); } if (opensearchVersionRanges.size() == 1) { return opensearchVersionRanges.get(0).toString(); @@ -491,7 +491,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws { builder.field("name", name); builder.field("version", version); - builder.field("opensearch_version", opensearchVersionRanges); + builder.field("opensearch_version", getOpenSearchVersionRangesString()); builder.field("java_version", javaVersion); builder.field("description", description); builder.field("classname", classname); diff --git a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java index 7f55c9f5cc7f7..12c7dc870c104 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java @@ -37,7 +37,10 @@ import org.opensearch.Version; import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.ByteBufferStreamInput; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.semver.SemverRange; import org.opensearch.test.OpenSearchTestCase; @@ -367,6 +370,31 @@ public void testSerialize() throws Exception { assertThat(info2.toString(), equalTo(info.toString())); } + public void testToXContent() throws Exception { + PluginInfo info = new PluginInfo( + "fake", + "foo", + "dummy", + Version.CURRENT, + "1.8", + "dummyClass", + "folder", + Collections.emptyList(), + false + ); + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + String prettyPrint = info.toXContent(builder, ToXContent.EMPTY_PARAMS).prettyPrint().toString(); + assertTrue(prettyPrint.contains("\"name\" : \"fake\"")); + assertTrue(prettyPrint.contains("\"version\" : \"dummy\"")); + assertTrue(prettyPrint.contains("\"opensearch_version\" : \"" + Version.CURRENT)); + assertTrue(prettyPrint.contains("\"java_version\" : \"1.8\"")); + assertTrue(prettyPrint.contains("\"description\" : \"foo\"")); + assertTrue(prettyPrint.contains("\"classname\" : \"dummyClass\"")); + assertTrue(prettyPrint.contains("\"custom_foldername\" : \"folder\"")); + assertTrue(prettyPrint.contains("\"extended_plugins\" : [ ]")); + assertTrue(prettyPrint.contains("\"has_native_controller\" : false")); + } + public void testPluginListSorted() { List plugins = new ArrayList<>(); plugins.add(new PluginInfo("c", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass", Collections.emptyList(), randomBoolean())); From 1235756d5f3a25b4f04634a4a4c278356edcd07c Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 12 Mar 2024 15:32:19 -0400 Subject: [PATCH 06/11] Bump opentelemetry from 1.35.0 to 1.36.0 (#12618) (#12621) (cherry picked from commit 96dc8207c425c2d0a6c03daccc3cf1c0c8aae0a7) Signed-off-by: Andriy Redko Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- CHANGELOG.md | 2 +- buildSrc/version.properties | 2 +- .../telemetry-otel/licenses/opentelemetry-api-1.35.0.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1 | 1 + .../licenses/opentelemetry-context-1.35.0.jar.sha1 | 1 - .../licenses/opentelemetry-context-1.36.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-common-1.35.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-common-1.36.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-logging-1.35.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-otlp-1.35.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-otlp-common-1.35.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1 | 1 + .../opentelemetry-exporter-sender-okhttp-1.35.0.jar.sha1 | 1 - .../opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1 | 1 + .../opentelemetry-extension-incubator-1.35.0-alpha.jar.sha1 | 1 - .../opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1 | 1 + .../telemetry-otel/licenses/opentelemetry-sdk-1.35.0.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-common-1.35.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-common-1.36.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-logs-1.35.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-metrics-1.35.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-trace-1.35.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1 | 1 + 28 files changed, 15 insertions(+), 15 deletions(-) delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.35.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.35.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.35.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.35.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.35.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.35.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.35.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.35.0-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.35.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.35.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.35.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.35.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.35.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 17c9410d6d466..fc7d6b7537261 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,7 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12326](https://github.com/opensearch-project/OpenSearch/pull/12326)) - Bump `gradle/wrapper-validation-action` from 1 to 2 ([#12367](https://github.com/opensearch-project/OpenSearch/pull/12367)) - Bump `netty` from 4.1.106.Final to 4.1.107.Final ([#12372](https://github.com/opensearch-project/OpenSearch/pull/12372)) -- Bump `opentelemetry` from 1.34.1 to 1.35.0 ([#12388](https://github.com/opensearch-project/OpenSearch/pull/12388)) +- Bump `opentelemetry` from 1.34.1 to 1.36.0 ([#12388](https://github.com/opensearch-project/OpenSearch/pull/12388), [#12618](https://github.com/opensearch-project/OpenSearch/pull/12618)) - Bump Apache Lucene from 9.9.2 to 9.10.0 ([#12392](https://github.com/opensearch-project/OpenSearch/pull/12392)) - Bump `org.apache.logging.log4j:log4j-core` from 2.22.1 to 2.23.1 ([#12464](https://github.com/opensearch-project/OpenSearch/pull/12464), [#12587](https://github.com/opensearch-project/OpenSearch/pull/12587)) - Bump `antlr4` from 4.11.1 to 4.13.1 ([#12445](https://github.com/opensearch-project/OpenSearch/pull/12445)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 641eab71d6b2a..e7e995fbbb310 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -68,5 +68,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.35.0 +opentelemetry = 1.36.0 opentelemetrysemconv = 1.23.1-alpha diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.35.0.jar.sha1 deleted file mode 100644 index e81b44b9e057f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -388c49986bc20f3b4bea58470eb16decd230c2db \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..b577500d71e1d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1 @@ -0,0 +1 @@ +59470f4aa3a9207f21936461b8fdcb36d46455ab \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.35.0.jar.sha1 deleted file mode 100644 index 0054417ef7b30..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6e2aa0e28c5069121cf11b2c93225942358f1423 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..d3156577248d5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1 @@ -0,0 +1 @@ +8850bc4c65d0fd22ff987b4683206ec4e69f2689 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.35.0.jar.sha1 deleted file mode 100644 index 27f96d15d6a70..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d0784bf59b74a2dc369551cc6d200e243ce8cca0 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..f176b21d12dc4 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1 @@ -0,0 +1 @@ +8d1cb823ab18fa871a1549e7c522bf28f2b3d8fe \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.35.0.jar.sha1 deleted file mode 100644 index dc21252f19d11..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f038bc2b9a1f415c8f74c4a35e0d92fae64c430 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..cd25e0ab9f294 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1 @@ -0,0 +1 @@ +bc045cae89ff6f18071760f6e4659dd880e88a1b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.35.0.jar.sha1 deleted file mode 100644 index 088f2475af00f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a7baebfbc6c569163bc74a5add9819cc411d582 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..fabb394f9c2e0 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1 @@ -0,0 +1 @@ +5ee49902ba884d6c3e48499a9311a624396d9630 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.35.0.jar.sha1 deleted file mode 100644 index 438e431f1a7d4..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5506cb34a43fb733564a2aee47763d34cada9a7a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..378ba4d43dcd1 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1 @@ -0,0 +1 @@ +2706e3b883d2bcd1a6b3e0bb4118ffbd7820550b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.35.0.jar.sha1 deleted file mode 100644 index 7fb0f09d60c0f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d5abbce20bf88dff97b9ec7104bf13d163042f30 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..a3d7e15e1a624 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1 @@ -0,0 +1 @@ +dcc924787b559278697b74dbc5bb6d046b236ef6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.35.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.35.0-alpha.jar.sha1 deleted file mode 100644 index 8ec097d471e16..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.35.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -72720d7880110d02aad6d69066cc0311c568f17d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..71ab3e184db9e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1 @@ -0,0 +1 @@ +d58f7c669e371f6ff61b705770af9a3c1f31df52 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.35.0.jar.sha1 deleted file mode 100644 index 7c9200f50e438..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -82fe6a227fb3148aae2e61978cf77f7005a66bca \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..c9a75d1b4350a --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1 @@ -0,0 +1 @@ +4056d1b562b4da7720817d8af15d1d3ccdf4b776 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.35.0.jar.sha1 deleted file mode 100644 index 9fd80da7597c2..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5d2bc29d8f2ef2cf5a2239ac6990a2c89118456d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..c31584f59c0d8 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1 @@ -0,0 +1 @@ +11d6f8c7b029efcb5c6c449cadef155b781afb78 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.35.0.jar.sha1 deleted file mode 100644 index d4dc7528c83d6..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da2122051bd95d3a36bf34f72f1b0dd9b105fd1f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..a134bb06ec635 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1 @@ -0,0 +1 @@ +98e94479db1e68c4779efc44bf6b4fca83e98b54 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.35.0.jar.sha1 deleted file mode 100644 index 7e1b206d42ba4..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97942849d51081e766a29646175b752bb79d7ce0 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..d146241f52f29 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1 @@ -0,0 +1 @@ +4f8f5d30c3eeede7b2260d979d9f403cfa381c3d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.35.0.jar.sha1 deleted file mode 100644 index 47125e70aa884..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -861b6a3c43a15ca3782f1fa17b024b9afa4b3ea6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..802761e38846c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1 @@ -0,0 +1 @@ +e3068cbaedfac6a28c6483923982b2efb861d3f4 \ No newline at end of file From 5bb007806ee18abce6322501acd95b210665e453 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 13 Mar 2024 11:35:46 -0700 Subject: [PATCH 07/11] Remove duplicated assert in test (#12616) (#12635) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This line can be safely removed because the two lines following this assert test exactly the same condition. (cherry picked from commit ef9314e2d76aa7a26c6f1f28a4236c724ce4521e) Signed-off-by: Lukáš Vlček Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- .../java/org/opensearch/indices/recovery/IndexRecoveryIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index 72e680e22ed75..8ce87f37d77cd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -298,7 +298,6 @@ public void testGatewayRecovery() throws Exception { logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); assertThat(response.shardRecoveryStates().size(), equalTo(SHARD_COUNT)); - assertThat(response.shardRecoveryStates().get(INDEX_NAME).size(), equalTo(1)); List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); assertThat(recoveryStates.size(), equalTo(1)); From 0dfc846be86ad10c6c971e62444f30c669f2a9c9 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Thu, 14 Mar 2024 04:50:16 +0800 Subject: [PATCH 08/11] [Backport 2.x] Force merge API supports performing only on primary shards (#11269) (#12609) * Force merge API supports performing only on primary shards (#11269) * Force merge API supports performing on primary shards only Signed-off-by: Gao Binlong * Modify change log Signed-off-by: Gao Binlong * Fix test failure Signed-off-by: Gao Binlong * Fix typo Signed-off-by: Gao Binlong * Modify skip version Signed-off-by: Gao Binlong * Add version check and more tests Signed-off-by: Gao Binlong * Format code Signed-off-by: Gao Binlong * Modify supported version and add more test Signed-off-by: Gao Binlong * Change the supported version to 3.0.0 Signed-off-by: Gao Binlong * Add test case in SegmentReplicationIT Signed-off-by: Gao Binlong * Optimize the test code Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong (cherry picked from commit e6eec36ccbdea99e7832d1b6a9c014d2cee1940a) * Modify supported version to 2.13.0 Signed-off-by: Gao Binlong * Fix test failure Signed-off-by: Gao Binlong * Fix yml test failure Signed-off-by: Gao Binlong * Fix yaml test failure Signed-off-by: Gao Binlong * Fix typo and test failure Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../rest-api-spec/api/indices.forcemerge.json | 4 + .../test/indices.forcemerge/10_basic.yml | 20 +++ .../20_wait_for_completion.yml | 32 ++++- .../indices/forcemerge/ForceMergeIT.java | 18 +++ .../replication/SegmentReplicationIT.java | 21 ++- .../indices/forcemerge/ForceMergeRequest.java | 27 ++++ .../forcemerge/ForceMergeRequestBuilder.java | 8 ++ .../forcemerge/TransportForceMergeAction.java | 9 +- .../cluster/routing/RoutingTable.java | 10 ++ .../admin/indices/RestForceMergeAction.java | 1 + .../forcemerge/ForceMergeRequestTests.java | 121 +++++++++++++++++- .../cluster/routing/RoutingTableTests.java | 25 ++++ 13 files changed, 287 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fc7d6b7537261..48f2dae928704 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) - Support for returning scores in matched queries ([#11626](https://github.com/opensearch-project/OpenSearch/pull/11626)) - Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063)) +- Force merge API supports performing on primary shards only ([#11269](https://github.com/opensearch-project/OpenSearch/pull/11269)) - Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835)) - [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) - The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json index 02fbcc36dfe64..986bce55f41e5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json @@ -63,6 +63,10 @@ "wait_for_completion": { "type" : "boolean", "description" : "If false, the request will return a task immediately and the operation will run in background. Defaults to true." + }, + "primary_only": { + "type" : "boolean", + "description" : "Specify whether the operation should only perform on primary shards. Defaults to false." } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml index d62c4c8882b13..7410e020e1a91 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml @@ -27,3 +27,23 @@ index: test max_num_segments: 10 only_expunge_deletes: true + +--- +"Test primary_only parameter": + - skip: + version: " - 2.12.99" + reason: "primary_only is available in 2.13.0+" + + - do: + indices.create: + index: test + body: + settings: + index.number_of_shards: 2 + index.number_of_replicas: 1 + + - do: + indices.forcemerge: + index: test + primary_only: true + - match: { _shards.total: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml index 1d6c63558bb59..dd95a46f7dee3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml @@ -4,15 +4,17 @@ # will return a task immediately and the merge process will run in background. - skip: - version: " - 2.6.99" - reason: "only available in 2.7+" - features: allowed_warnings + version: " - 2.6.99, 2.13.0 - " + reason: "wait_for_completion was introduced in 2.7.0 and task description was changed in 2.13.0" + features: allowed_warnings, node_selector - do: indices.create: index: test_index - do: + node_selector: + version: " 2.7.0 - 2.12.99" indices.forcemerge: index: test_index wait_for_completion: false @@ -27,6 +29,30 @@ - match: { task.action: "indices:admin/forcemerge" } - match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true]" } +--- +"Force merge index with wait_for_completion after task description changed": + - skip: + version: " - 2.12.99 " + reason: "task description was changed in 2.13.0" + features: allowed_warnings, node_selector + + - do: + node_selector: + version: " 2.13.0 - " + indices.forcemerge: + index: test_index + wait_for_completion: false + max_num_segments: 1 + - match: { task: /^.+$/ } + - set: { task: taskId } + + - do: + tasks.get: + wait_for_completion: true + task_id: $taskId + - match: { task.action: "indices:admin/forcemerge" } + - match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]" } + # .tasks index is created when the force-merge operation completes, so we should delete .tasks index finally, # if not, the .tasks index may introduce unexpected warnings and then cause other test cases to fail. # Delete the .tasks index directly will also introduce warning, but currently we don't have such APIs which can delete one diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java index 09af533292e9a..5090af1706d5a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java @@ -100,6 +100,24 @@ public void testForceMergeUUIDConsistent() throws IOException { assertThat(primaryForceMergeUUID, is(replicaForceMergeUUID)); } + public void testForceMergeOnlyOnPrimaryShards() throws IOException { + internalCluster().ensureAtLeastNumDataNodes(2); + final String index = "test-index"; + createIndex( + index, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build() + ); + ensureGreen(index); + final ForceMergeResponse forceMergeResponse = client().admin() + .indices() + .prepareForceMerge(index) + .setMaxNumSegments(1) + .setPrimaryOnly(true) + .get(); + assertThat(forceMergeResponse.getFailedShards(), is(0)); + assertThat(forceMergeResponse.getSuccessfulShards(), is(1)); + } + private static String getForceMergeUUID(IndexShard indexShard) throws IOException { try (GatedCloseable wrappedIndexCommit = indexShard.acquireLastIndexCommit(true)) { return wrappedIndexCommit.get().getUserData().get(Engine.FORCE_MERGE_UUID_KEY); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index d4de69fe5f325..c6ecd7d62c7d1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -25,6 +25,7 @@ import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.get.GetResponse; @@ -400,6 +401,14 @@ public void testMultipleShards() throws Exception { } public void testReplicationAfterForceMerge() throws Exception { + performReplicationAfterForceMerge(false, SHARD_COUNT * (1 + REPLICA_COUNT)); + } + + public void testReplicationAfterForceMergeOnPrimaryShardsOnly() throws Exception { + performReplicationAfterForceMerge(true, SHARD_COUNT); + } + + private void performReplicationAfterForceMerge(boolean primaryOnly, int expectedSuccessfulShards) throws Exception { final String nodeA = internalCluster().startDataOnlyNode(); final String nodeB = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); @@ -430,8 +439,16 @@ public void testReplicationAfterForceMerge() throws Exception { waitForDocs(expectedHitCount, indexer); waitForSearchableDocs(expectedHitCount, nodeA, nodeB); - // Force a merge here so that the in memory SegmentInfos does not reference old segments on disk. - client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(false).get(); + // Perform force merge only on the primary shards. + final ForceMergeResponse forceMergeResponse = client().admin() + .indices() + .prepareForceMerge(INDEX_NAME) + .setPrimaryOnly(primaryOnly) + .setMaxNumSegments(1) + .setFlush(false) + .get(); + assertThat(forceMergeResponse.getFailedShards(), is(0)); + assertThat(forceMergeResponse.getSuccessfulShards(), is(expectedSuccessfulShards)); refresh(INDEX_NAME); verifyStoreContent(); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java index be96b54754fee..521cafbe4d765 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -70,11 +70,13 @@ public static final class Defaults { public static final int MAX_NUM_SEGMENTS = -1; public static final boolean ONLY_EXPUNGE_DELETES = false; public static final boolean FLUSH = true; + public static final boolean PRIMARY_ONLY = false; } private int maxNumSegments = Defaults.MAX_NUM_SEGMENTS; private boolean onlyExpungeDeletes = Defaults.ONLY_EXPUNGE_DELETES; private boolean flush = Defaults.FLUSH; + private boolean primaryOnly = Defaults.PRIMARY_ONLY; private static final Version FORCE_MERGE_UUID_VERSION = LegacyESVersion.V_7_7_0; @@ -102,6 +104,9 @@ public ForceMergeRequest(StreamInput in) throws IOException { maxNumSegments = in.readInt(); onlyExpungeDeletes = in.readBoolean(); flush = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + primaryOnly = in.readBoolean(); + } if (in.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { forceMergeUUID = in.readOptionalString(); } else { @@ -167,6 +172,21 @@ public ForceMergeRequest flush(boolean flush) { return this; } + /** + * Should force merge only performed on primary shards. Defaults to {@code false}. + */ + public boolean primaryOnly() { + return primaryOnly; + } + + /** + * Should force merge only performed on primary shards. Defaults to {@code false}. + */ + public ForceMergeRequest primaryOnly(boolean primaryOnly) { + this.primaryOnly = primaryOnly; + return this; + } + /** * Should this task store its result after it has finished? */ @@ -189,6 +209,8 @@ public String getDescription() { + onlyExpungeDeletes + "], flush[" + flush + + "], primaryOnly[" + + primaryOnly + "]"; } @@ -198,6 +220,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(maxNumSegments); out.writeBoolean(onlyExpungeDeletes); out.writeBoolean(flush); + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + out.writeBoolean(primaryOnly); + } if (out.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { out.writeOptionalString(forceMergeUUID); } @@ -212,6 +237,8 @@ public String toString() { + onlyExpungeDeletes + ", flush=" + flush + + ", primaryOnly=" + + primaryOnly + '}'; } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index d8a618a1828ad..10b9749f16b27 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -81,4 +81,12 @@ public ForceMergeRequestBuilder setFlush(boolean flush) { request.flush(flush); return this; } + + /** + * Should force merge only performed on primary shards. Defaults to {@code false}. + */ + public ForceMergeRequestBuilder setPrimaryOnly(boolean primaryOnly) { + request.primaryOnly(primaryOnly); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index fb8eb86c12269..b71c75462900a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -115,11 +115,16 @@ protected EmptyResult shardOperation(ForceMergeRequest request, ShardRouting sha } /** - * The refresh request works against *all* shards. + * The force merge request works against *all* shards by default, but it can work against all primary shards only + * by setting primary_only to true. */ @Override protected ShardsIterator shards(ClusterState clusterState, ForceMergeRequest request, String[] concreteIndices) { - return clusterState.routingTable().allShards(concreteIndices); + if (request.primaryOnly()) { + return clusterState.routingTable().allShardsSatisfyingPredicate(concreteIndices, ShardRouting::primary); + } else { + return clusterState.routingTable().allShards(concreteIndices); + } } @Override diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java index 7f2382f8b4910..e4095a84be081 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java @@ -307,6 +307,16 @@ public ShardsIterator allShardsSatisfyingPredicate(Predicate predi return allShardsSatisfyingPredicate(indices, predicate, false); } + /** + * All the shards for the provided indices on the node which match the predicate + * @param indices indices to return all the shards. + * @param predicate condition to match + * @return iterator over shards matching the predicate for the specific indices + */ + public ShardsIterator allShardsSatisfyingPredicate(String[] indices, Predicate predicate) { + return allShardsSatisfyingPredicate(indices, predicate, false); + } + private ShardsIterator allShardsSatisfyingPredicate( String[] indices, Predicate predicate, diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java index 06f1d5f46f90b..f3e66bd20cd86 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java @@ -76,6 +76,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC mergeRequest.maxNumSegments(request.paramAsInt("max_num_segments", mergeRequest.maxNumSegments())); mergeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", mergeRequest.onlyExpungeDeletes())); mergeRequest.flush(request.paramAsBoolean("flush", mergeRequest.flush())); + mergeRequest.primaryOnly(request.paramAsBoolean("primary_only", mergeRequest.primaryOnly())); if (mergeRequest.onlyExpungeDeletes() && mergeRequest.maxNumSegments() != ForceMergeRequest.Defaults.MAX_NUM_SEGMENTS) { deprecationLogger.deprecate( "force_merge_expunge_deletes_and_max_num_segments_deprecation", diff --git a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java index 87ba6110447c1..430d600354e3b 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java @@ -31,21 +31,136 @@ package org.opensearch.action.admin.indices.forcemerge; +import org.opensearch.Version; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.VersionUtils; public class ForceMergeRequestTests extends OpenSearchTestCase { public void testDescription() { ForceMergeRequest request = new ForceMergeRequest(); - assertEquals("Force-merge indices [], maxSegments[-1], onlyExpungeDeletes[false], flush[true]", request.getDescription()); + assertEquals( + "Force-merge indices [], maxSegments[-1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]", + request.getDescription() + ); request = new ForceMergeRequest("shop", "blog"); - assertEquals("Force-merge indices [shop, blog], maxSegments[-1], onlyExpungeDeletes[false], flush[true]", request.getDescription()); + assertEquals( + "Force-merge indices [shop, blog], maxSegments[-1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]", + request.getDescription() + ); request = new ForceMergeRequest(); request.maxNumSegments(12); request.onlyExpungeDeletes(true); request.flush(false); - assertEquals("Force-merge indices [], maxSegments[12], onlyExpungeDeletes[true], flush[false]", request.getDescription()); + request.primaryOnly(true); + assertEquals( + "Force-merge indices [], maxSegments[12], onlyExpungeDeletes[true], flush[false], primaryOnly[true]", + request.getDescription() + ); + } + + public void testToString() { + ForceMergeRequest request = new ForceMergeRequest(); + assertEquals("ForceMergeRequest{maxNumSegments=-1, onlyExpungeDeletes=false, flush=true, primaryOnly=false}", request.toString()); + + request = new ForceMergeRequest(); + request.maxNumSegments(12); + request.onlyExpungeDeletes(true); + request.flush(false); + request.primaryOnly(true); + assertEquals("ForceMergeRequest{maxNumSegments=12, onlyExpungeDeletes=true, flush=false, primaryOnly=true}", request.toString()); + } + + public void testSerialization() throws Exception { + final ForceMergeRequest request = randomRequest(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + + final ForceMergeRequest deserializedRequest; + try (StreamInput in = out.bytes().streamInput()) { + deserializedRequest = new ForceMergeRequest(in); + } + assertEquals(request.maxNumSegments(), deserializedRequest.maxNumSegments()); + assertEquals(request.onlyExpungeDeletes(), deserializedRequest.onlyExpungeDeletes()); + assertEquals(request.flush(), deserializedRequest.flush()); + assertEquals(request.primaryOnly(), deserializedRequest.primaryOnly()); + assertEquals(request.forceMergeUUID(), deserializedRequest.forceMergeUUID()); + } + } + + public void testBwcSerialization() throws Exception { + { + final ForceMergeRequest sample = randomRequest(); + final Version compatibleVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(compatibleVersion); + sample.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(Version.CURRENT); + TaskId.readFromStream(in); + in.readStringArray(); + IndicesOptions.readIndicesOptions(in); + int maxNumSegments = in.readInt(); + boolean onlyExpungeDeletes = in.readBoolean(); + boolean flush = in.readBoolean(); + String forceMergeUUID = in.readOptionalString(); + assertEquals(sample.maxNumSegments(), maxNumSegments); + assertEquals(sample.onlyExpungeDeletes(), onlyExpungeDeletes); + assertEquals(sample.flush(), flush); + assertEquals(sample.forceMergeUUID(), forceMergeUUID); + } + + } + } + + { + final ForceMergeRequest sample = randomRequest(); + final Version compatibleVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(Version.CURRENT); + sample.getParentTask().writeTo(out); + out.writeStringArray(sample.indices()); + sample.indicesOptions().writeIndicesOptions(out); + out.writeInt(sample.maxNumSegments()); + out.writeBoolean(sample.onlyExpungeDeletes()); + out.writeBoolean(sample.flush()); + if (compatibleVersion.onOrAfter(Version.V_2_13_0)) { + out.writeBoolean(sample.primaryOnly()); + } + out.writeOptionalString(sample.forceMergeUUID()); + + final ForceMergeRequest deserializedRequest; + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(compatibleVersion); + deserializedRequest = new ForceMergeRequest(in); + } + + assertEquals(sample.maxNumSegments(), deserializedRequest.maxNumSegments()); + assertEquals(sample.onlyExpungeDeletes(), deserializedRequest.onlyExpungeDeletes()); + assertEquals(sample.flush(), deserializedRequest.flush()); + if (compatibleVersion.onOrAfter(Version.V_2_13_0)) { + assertEquals(sample.primaryOnly(), deserializedRequest.primaryOnly()); + } + assertEquals(sample.forceMergeUUID(), deserializedRequest.forceMergeUUID()); + } + } + } + + private ForceMergeRequest randomRequest() { + ForceMergeRequest request = new ForceMergeRequest(); + if (randomBoolean()) { + request.maxNumSegments(randomIntBetween(1, 10)); + } + request.onlyExpungeDeletes(true); + request.flush(randomBoolean()); + request.primaryOnly(randomBoolean()); + return request; } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java index 8542ff53c6ff1..97283f561d6d4 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java @@ -279,6 +279,31 @@ public void testAllShardsMatchingPredicate() { ); } + public void testAllShardsMatchingPredicateWithSpecificIndices() { + MockAllocationService allocation = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator()); + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .put(IndexMetadata.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(RoutingTable.builder().addAsNew(metadata.index("test1")).addAsNew(metadata.index("test2")).build()) + .build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + clusterState = allocation.reroute(clusterState, "reroute"); + + String[] indices = new String[] { "test1", "test2" }; + // Verifies against all primary shards on the node + assertThat(clusterState.routingTable().allShardsSatisfyingPredicate(indices, ShardRouting::primary).size(), is(2)); + // Verifies against all replica shards on the node + assertThat( + clusterState.routingTable().allShardsSatisfyingPredicate(indices, shardRouting -> !shardRouting.primary()).size(), + is(2) + ); + } + public void testActivePrimaryShardsGrouped() { assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], true).size(), is(0)); assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], false).size(), is(0)); From 1f8d0ecbf80110f1084470c742c5e7262b37c5f7 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 13 Mar 2024 16:51:54 -0400 Subject: [PATCH 09/11] Bump reactor and reactor-netty dependencies (#12633) (#12636) Signed-off-by: Andriy Redko (cherry picked from commit 949f316e0d89076f8323593885bb5f9fb8c28f98) --- CHANGELOG.md | 2 ++ buildSrc/version.properties | 4 ++-- .../licenses/reactor-netty-core-1.1.15.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.1.17.jar.sha1 | 1 + .../licenses/reactor-netty-http-1.1.15.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.1.17.jar.sha1 | 1 + .../licenses/reactor-netty-core-1.1.15.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.1.17.jar.sha1 | 1 + .../licenses/reactor-netty-http-1.1.15.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.1.17.jar.sha1 | 1 + server/licenses/reactor-core-3.5.14.jar.sha1 | 1 - server/licenses/reactor-core-3.5.15.jar.sha1 | 1 + 12 files changed, 9 insertions(+), 7 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.15.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.15.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.15.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.15.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 delete mode 100644 server/licenses/reactor-core-3.5.14.jar.sha1 create mode 100644 server/licenses/reactor-core-3.5.15.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 48f2dae928704..fe052c788535a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.zookeeper:zookeeper` from 3.9.1 to 3.9.2 ([#12580](https://github.com/opensearch-project/OpenSearch/pull/12580)) - Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#12579](https://github.com/opensearch-project/OpenSearch/pull/12579)) - Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) +- Bump `reactor-netty` from 1.1.15 to 1.1.17 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) +- Bump `reactor` from 3.5.14 to 3.5.15 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) ### Changed - Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index e7e995fbbb310..060f8435a4d9a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -30,8 +30,8 @@ netty = 4.1.107.Final joda = 2.12.2 # project reactor -reactor_netty = 1.1.15 -reactor = 3.5.14 +reactor_netty = 1.1.17 +reactor = 3.5.15 # client dependencies httpclient = 4.5.14 diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.15.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.15.jar.sha1 deleted file mode 100644 index c30a99a2338b4..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3221d405ad55a573cf29875a8244a4217cf07185 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..3d631bc904f24 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 @@ -0,0 +1 @@ +319b1d41f28e92b31b7ca0f19183337f5539bb44 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.15.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.15.jar.sha1 deleted file mode 100644 index ab3171cd02b73..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c79756fa2dfc28ac81fc9d23a14b17c656c3e560 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..9ceef6959744b --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 @@ -0,0 +1 @@ +9ed949dcd050ef30d9eeedd53d95d1dce20ce832 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.15.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.15.jar.sha1 deleted file mode 100644 index c30a99a2338b4..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3221d405ad55a573cf29875a8244a4217cf07185 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..3d631bc904f24 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 @@ -0,0 +1 @@ +319b1d41f28e92b31b7ca0f19183337f5539bb44 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.15.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.15.jar.sha1 deleted file mode 100644 index ab3171cd02b73..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c79756fa2dfc28ac81fc9d23a14b17c656c3e560 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..9ceef6959744b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 @@ -0,0 +1 @@ +9ed949dcd050ef30d9eeedd53d95d1dce20ce832 \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.14.jar.sha1 b/server/licenses/reactor-core-3.5.14.jar.sha1 deleted file mode 100644 index 3b58e7a68bade..0000000000000 --- a/server/licenses/reactor-core-3.5.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6e0c97c2e78273a00fd4ed38016b19ff3c6de59e \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.15.jar.sha1 b/server/licenses/reactor-core-3.5.15.jar.sha1 new file mode 100644 index 0000000000000..02df47ed58b9d --- /dev/null +++ b/server/licenses/reactor-core-3.5.15.jar.sha1 @@ -0,0 +1 @@ +4e07a24c671235a2a806e75e9b8ff23d7d1db3d4 \ No newline at end of file From b1c8629df16115af0c219b44e4bbc65abe5633dc Mon Sep 17 00:00:00 2001 From: Sandesh Kumar Date: Wed, 13 Mar 2024 14:32:16 -0700 Subject: [PATCH 10/11] [Backport 2.x] Quickly compute terms aggregations when the top-level query is functionally match-all for a segment (#12629) Quickly compute terms aggregations when the top-level query is functionally match-all for a segment (#11643) --------- Signed-off-by: Sandesh Kumar --- CHANGELOG.md | 1 + .../GlobalOrdinalsStringTermsAggregator.java | 113 +++++++++- .../aggregations/support/ValuesSource.java | 4 + .../search/internal/ContextIndexSearcher.java | 5 + .../terms/KeywordTermsAggregatorTests.java | 76 ++++--- .../bucket/terms/TermsAggregatorTests.java | 198 ++++++++++++++---- .../search/query/QueryPhaseTests.java | 28 ++- .../aggregations/AggregatorTestCase.java | 97 ++++++++- 8 files changed, 439 insertions(+), 83 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe052c788535a..66c7f5a4020bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) +- Quickly compute terms aggregations when the top-level query is functionally match-all for a segment ([#11643](https://github.com/opensearch-project/OpenSearch/pull/11643)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 5ed899408ab40..69fda2f3f6133 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -35,8 +35,13 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; @@ -46,6 +51,7 @@ import org.opensearch.common.util.LongHash; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.AggregationExecutionException; import org.opensearch.search.aggregations.Aggregator; @@ -73,6 +79,7 @@ import static org.opensearch.search.aggregations.InternalOrder.isKeyOrder; import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS; +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; /** * An aggregator of string values that relies on global ordinals in order to build buckets. @@ -85,6 +92,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr private final LongPredicate acceptedGlobalOrdinals; private final long valueCount; + private final String fieldName; + private Weight weight; private final GlobalOrdLookupFunction lookupGlobalOrd; protected final CollectionStrategy collectionStrategy; protected int segmentsWithSingleValuedOrds = 0; @@ -136,16 +145,105 @@ public GlobalOrdinalsStringTermsAggregator( return new DenseGlobalOrds(); }); } + this.fieldName = (valuesSource instanceof ValuesSource.Bytes.WithOrdinals.FieldData) + ? ((ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource).getIndexFieldName() + : null; } String descriptCollectionStrategy() { return collectionStrategy.describe(); } + public void setWeight(Weight weight) { + this.weight = weight; + } + + /** + Read doc frequencies directly from indexed terms in the segment to skip iterating through individual documents + @param ctx The LeafReaderContext to collect terms from + @param globalOrds The SortedSetDocValues for the field's ordinals + @param ordCountConsumer A consumer to accept collected term frequencies + @return A LeafBucketCollector implementation with collection termination, since collection is complete + @throws IOException If an I/O error occurs during reading + */ + LeafBucketCollector termDocFreqCollector( + LeafReaderContext ctx, + SortedSetDocValues globalOrds, + BiConsumer ordCountConsumer + ) throws IOException { + if (weight == null) { + // Weight not assigned - cannot use this optimization + return null; + } else { + if (weight.count(ctx) == 0) { + // No documents matches top level query on this segment, we can skip the segment entirely + return LeafBucketCollector.NO_OP_COLLECTOR; + } else if (weight.count(ctx) != ctx.reader().maxDoc()) { + // weight.count(ctx) == ctx.reader().maxDoc() implies there are no deleted documents and + // top-level query matches all docs in the segment + return null; + } + } + + Terms segmentTerms = ctx.reader().terms(this.fieldName); + if (segmentTerms == null) { + // Field is not indexed. + return null; + } + + NumericDocValues docCountValues = DocValues.getNumeric(ctx.reader(), DocCountFieldMapper.NAME); + if (docCountValues.nextDoc() != NO_MORE_DOCS) { + // This segment has at least one document with the _doc_count field. + return null; + } + + TermsEnum indexTermsEnum = segmentTerms.iterator(); + BytesRef indexTerm = indexTermsEnum.next(); + TermsEnum globalOrdinalTermsEnum = globalOrds.termsEnum(); + BytesRef ordinalTerm = globalOrdinalTermsEnum.next(); + + // Iterate over the terms in the segment, look for matches in the global ordinal terms, + // and increment bucket count when segment terms match global ordinal terms. + while (indexTerm != null && ordinalTerm != null) { + int compare = indexTerm.compareTo(ordinalTerm); + if (compare == 0) { + if (acceptedGlobalOrdinals.test(globalOrdinalTermsEnum.ord())) { + ordCountConsumer.accept(globalOrdinalTermsEnum.ord(), indexTermsEnum.docFreq()); + } + indexTerm = indexTermsEnum.next(); + ordinalTerm = globalOrdinalTermsEnum.next(); + } else if (compare < 0) { + indexTerm = indexTermsEnum.next(); + } else { + ordinalTerm = globalOrdinalTermsEnum.next(); + } + } + return new LeafBucketCollector() { + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + throw new CollectionTerminatedException(); + } + }; + } + @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { SortedSetDocValues globalOrds = valuesSource.globalOrdinalsValues(ctx); collectionStrategy.globalOrdsReady(globalOrds); + + if (collectionStrategy instanceof DenseGlobalOrds + && this.resultStrategy instanceof StandardTermsResults + && sub == LeafBucketCollector.NO_OP_COLLECTOR) { + LeafBucketCollector termDocFreqCollector = termDocFreqCollector( + ctx, + globalOrds, + (ord, docCount) -> incrementBucketDocCount(collectionStrategy.globalOrdToBucketOrd(0, ord), docCount) + ); + if (termDocFreqCollector != null) { + return termDocFreqCollector; + } + } + SortedDocValues singleValues = DocValues.unwrapSingleton(globalOrds); if (singleValues != null) { segmentsWithSingleValuedOrds++; @@ -343,9 +441,20 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol final SortedSetDocValues segmentOrds = valuesSource.ordinalsValues(ctx); segmentDocCounts = context.bigArrays().grow(segmentDocCounts, 1 + segmentOrds.getValueCount()); assert sub == LeafBucketCollector.NO_OP_COLLECTOR; - final SortedDocValues singleValues = DocValues.unwrapSingleton(segmentOrds); mapping = valuesSource.globalOrdinalsMapping(ctx); - // Dense mode doesn't support include/exclude so we don't have to check it here. + + if (this.resultStrategy instanceof StandardTermsResults) { + LeafBucketCollector termDocFreqCollector = this.termDocFreqCollector( + ctx, + segmentOrds, + (ord, docCount) -> incrementBucketDocCount(mapping.applyAsLong(ord), docCount) + ); + if (termDocFreqCollector != null) { + return termDocFreqCollector; + } + } + + final SortedDocValues singleValues = DocValues.unwrapSingleton(segmentOrds); if (singleValues != null) { segmentsWithSingleValuedOrds++; return resultStrategy.wrapCollector(new LeafBucketCollectorBase(sub, segmentOrds) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java index 3ce1f0447dfcc..1f4dd429e094e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java @@ -244,6 +244,10 @@ public FieldData(IndexOrdinalsFieldData indexFieldData) { this.indexFieldData = indexFieldData; } + public String getIndexFieldName() { + return this.indexFieldData.getFieldName(); + } + @Override public SortedBinaryDocValues bytesValues(LeafReaderContext context) { final LeafOrdinalsFieldData atomicFieldData = indexFieldData.load(context); diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index 403b0b545c113..ec3ed2332d0b8 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -387,6 +387,11 @@ public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { return null; } } + + @Override + public int count(LeafReaderContext context) throws IOException { + return weight.count(context); + } }; } else { return weight; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java index 4229361aa7f46..753644dce81d5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.terms; import org.apache.lucene.document.Document; -import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; @@ -41,7 +40,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.BytesRef; +import org.opensearch.common.TriConsumer; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.aggregations.AggregatorTestCase; @@ -57,6 +56,8 @@ public class KeywordTermsAggregatorTests extends AggregatorTestCase { private static final String KEYWORD_FIELD = "keyword"; + private static final Consumer CONFIGURE_KEYWORD_FIELD = agg -> agg.field(KEYWORD_FIELD); + private static final List dataset; static { List d = new ArrayList<>(45); @@ -68,51 +69,63 @@ public class KeywordTermsAggregatorTests extends AggregatorTestCase { dataset = d; } + private static final Consumer VERIFY_MATCH_ALL_DOCS = agg -> { + assertEquals(9, agg.getBuckets().size()); + for (int i = 0; i < 9; i++) { + StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); + assertThat(bucket.getKey(), equalTo(String.valueOf(9L - i))); + assertThat(bucket.getDocCount(), equalTo(9L - i)); + } + }; + + private static final Consumer VERIFY_MATCH_NO_DOCS = agg -> { assertEquals(0, agg.getBuckets().size()); }; + + private static final Query MATCH_ALL_DOCS_QUERY = new MatchAllDocsQuery(); + + private static final Query MATCH_NO_DOCS_QUERY = new MatchNoDocsQuery(); + public void testMatchNoDocs() throws IOException { testSearchCase( - new MatchNoDocsQuery(), + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_NO_DOCS_QUERY, dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), - agg -> assertEquals(0, agg.getBuckets().size()), - null // without type hint + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_NO_DOCS, + null // without type hint ); testSearchCase( - new MatchNoDocsQuery(), + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_NO_DOCS_QUERY, dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), - agg -> assertEquals(0, agg.getBuckets().size()), - ValueType.STRING // with type hint + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_NO_DOCS, + ValueType.STRING // with type hint ); } public void testMatchAllDocs() throws IOException { - Query query = new MatchAllDocsQuery(); - - testSearchCase(query, dataset, aggregation -> aggregation.field(KEYWORD_FIELD), agg -> { - assertEquals(9, agg.getBuckets().size()); - for (int i = 0; i < 9; i++) { - StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); - assertThat(bucket.getKey(), equalTo(String.valueOf(9L - i))); - assertThat(bucket.getDocCount(), equalTo(9L - i)); - } - }, - null // without type hint + testSearchCase( + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_ALL_DOCS_QUERY, + dataset, + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_ALL_DOCS, + null // without type hint ); - testSearchCase(query, dataset, aggregation -> aggregation.field(KEYWORD_FIELD), agg -> { - assertEquals(9, agg.getBuckets().size()); - for (int i = 0; i < 9; i++) { - StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); - assertThat(bucket.getKey(), equalTo(String.valueOf(9L - i))); - assertThat(bucket.getDocCount(), equalTo(9L - i)); - } - }, - ValueType.STRING // with type hint + testSearchCase( + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_ALL_DOCS_QUERY, + dataset, + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_ALL_DOCS, + ValueType.STRING // with type hint ); } private void testSearchCase( + TriConsumer addField, Query query, List dataset, Consumer configure, @@ -123,7 +136,7 @@ private void testSearchCase( try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); for (String value : dataset) { - document.add(new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef(value))); + addField.apply(document, KEYWORD_FIELD, value); indexWriter.addDocument(document); document.clear(); } @@ -147,5 +160,4 @@ private void testSearchCase( } } } - } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 80744ecde4d69..6d105c27a692f 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -44,6 +44,8 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -52,6 +54,7 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.TriConsumer; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.Settings; @@ -120,6 +123,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -136,9 +140,6 @@ import static org.mockito.Mockito.when; public class TermsAggregatorTests extends AggregatorTestCase { - - private boolean randomizeAggregatorImpl = true; - // Constants for a script that returns a string private static final String STRING_SCRIPT_NAME = "string_script"; private static final String STRING_SCRIPT_OUTPUT = "Orange"; @@ -171,9 +172,22 @@ protected ScriptService getMockScriptService() { return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); } + protected CountingAggregator createCountingAggregator( + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + boolean randomizeAggregatorImpl, + MappedFieldType... fieldTypes + ) throws IOException { + return new CountingAggregator( + new AtomicInteger(), + createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldTypes) + ); + } + protected A createAggregator( AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, + boolean randomizeAggregatorImpl, MappedFieldType... fieldTypes ) throws IOException { try { @@ -188,6 +202,14 @@ protected A createAggregator( } } + protected A createAggregator( + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + MappedFieldType... fieldTypes + ) throws IOException { + return createAggregator(aggregationBuilder, indexSearcher, true, fieldTypes); + } + @Override protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldType, String fieldName) { return new TermsAggregationBuilder("foo").field(fieldName); @@ -207,8 +229,7 @@ protected List getSupportedValuesSourceTypes() { } public void testUsesGlobalOrdinalsByDefault() throws Exception { - randomizeAggregatorImpl = false; - + boolean randomizeAggregatorImpl = false; Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); indexWriter.close(); @@ -220,35 +241,35 @@ public void testUsesGlobalOrdinalsByDefault() throws Exception { .field("string"); MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); - TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); GlobalOrdinalsStringTermsAggregator globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.descriptCollectionStrategy(), equalTo("dense")); // Infers depth_first because the maxOrd is 0 which is less than the size aggregationBuilder.subAggregation(AggregationBuilders.cardinality("card").field("string")); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.collectMode, equalTo(Aggregator.SubAggCollectionMode.DEPTH_FIRST)); assertThat(globalAgg.descriptCollectionStrategy(), equalTo("remap")); aggregationBuilder.collectMode(Aggregator.SubAggCollectionMode.DEPTH_FIRST); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.collectMode, equalTo(Aggregator.SubAggCollectionMode.DEPTH_FIRST)); assertThat(globalAgg.descriptCollectionStrategy(), equalTo("remap")); aggregationBuilder.collectMode(Aggregator.SubAggCollectionMode.BREADTH_FIRST); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.collectMode, equalTo(Aggregator.SubAggCollectionMode.BREADTH_FIRST)); assertThat(globalAgg.descriptCollectionStrategy(), equalTo("dense")); aggregationBuilder.order(BucketOrder.aggregation("card", true)); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.descriptCollectionStrategy(), equalTo("remap")); @@ -257,51 +278,145 @@ public void testUsesGlobalOrdinalsByDefault() throws Exception { directory.close(); } - public void testSimple() throws Exception { + /** + * This test case utilizes the default implementation of GlobalOrdinalsStringTermsAggregator since collectSegmentOrds is false + */ + public void testSimpleAggregation() throws Exception { + // Fields not indexed: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_NOT_INDEXED, false, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, deleted documents in segment: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, true, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, no deleted documents in segment: will use LeafBucketCollector#termDocFreqCollector - no documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 0); + + // Fields indexed, no deleted documents, but _doc_field value present in document: + // cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, true, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + } + + /** + * This test case utilizes the LowCardinality implementation of GlobalOrdinalsStringTermsAggregator since collectSegmentOrds is true + */ + public void testSimpleAggregationLowCardinality() throws Exception { + // Fields not indexed: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_NOT_INDEXED, false, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, deleted documents in segment: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, true, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, no deleted documents in segment: will use LeafBucketCollector#termDocFreqCollector - no documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 0); + + // Fields indexed, no deleted documents, but _doc_field value present in document: + // cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, true, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + } + + /** + * This test case utilizes the MapStringTermsAggregator. + */ + public void testSimpleMapStringAggregation() throws Exception { + testSimple( + ADD_SORTED_SET_FIELD_INDEXED, + randomBoolean(), + randomBoolean(), + randomBoolean(), + TermsAggregatorFactory.ExecutionMode.MAP, + 4 + ); + } + + /** + * This is a utility method to test out string terms aggregation + * @param addFieldConsumer a function that determines how a field is added to the document + * @param includeDeletedDocumentsInSegment to include deleted documents in the segment or not + * @param collectSegmentOrds collect segment ords or not - set true to utilize LowCardinality implementation for GlobalOrdinalsStringTermsAggregator + * @param executionMode execution mode MAP or GLOBAL_ORDINALS + * @param expectedCollectCount expected number of documents visited as part of collect() invocation + */ + private void testSimple( + TriConsumer addFieldConsumer, + final boolean includeDeletedDocumentsInSegment, + final boolean includeDocCountField, + boolean collectSegmentOrds, + TermsAggregatorFactory.ExecutionMode executionMode, + final int expectedCollectCount + ) throws Exception { try (Directory directory = newDirectory()) { - try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + try ( + RandomIndexWriter indexWriter = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { Document document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef("a"))); - document.add(new SortedSetDocValuesField("string", new BytesRef("b"))); + addFieldConsumer.apply(document, "string", "a"); + addFieldConsumer.apply(document, "string", "b"); indexWriter.addDocument(document); document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef(""))); - document.add(new SortedSetDocValuesField("string", new BytesRef("c"))); - document.add(new SortedSetDocValuesField("string", new BytesRef("a"))); + addFieldConsumer.apply(document, "string", ""); + addFieldConsumer.apply(document, "string", "c"); + addFieldConsumer.apply(document, "string", "a"); indexWriter.addDocument(document); document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef("b"))); - document.add(new SortedSetDocValuesField("string", new BytesRef("d"))); + addFieldConsumer.apply(document, "string", "b"); + addFieldConsumer.apply(document, "string", "d"); indexWriter.addDocument(document); document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef(""))); + addFieldConsumer.apply(document, "string", ""); + if (includeDocCountField) { + // Adding _doc_count to one document + document.add(new NumericDocValuesField("_doc_count", 10)); + } indexWriter.addDocument(document); + + if (includeDeletedDocumentsInSegment) { + document = new Document(); + ADD_SORTED_SET_FIELD_INDEXED.apply(document, "string", "e"); + indexWriter.addDocument(document); + indexWriter.deleteDocuments(new Term("string", "e")); + assertEquals(5, indexWriter.getDocStats().maxDoc); // deleted document still in segment + } + try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { IndexSearcher indexSearcher = newIndexSearcher(indexReader); - for (TermsAggregatorFactory.ExecutionMode executionMode : TermsAggregatorFactory.ExecutionMode.values()) { - TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint( - ValueType.STRING - ).executionHint(executionMode.toString()).field("string").order(BucketOrder.key(true)); - MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); - TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); - aggregator.preCollection(); - indexSearcher.search(new MatchAllDocsQuery(), aggregator); - aggregator.postCollection(); - Terms result = reduce(aggregator); - assertEquals(5, result.getBuckets().size()); - assertEquals("", result.getBuckets().get(0).getKeyAsString()); + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint(ValueType.STRING) + .executionHint(executionMode.toString()) + .field("string") + .order(BucketOrder.key(true)); + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); + + TermsAggregatorFactory.COLLECT_SEGMENT_ORDS = collectSegmentOrds; + TermsAggregatorFactory.REMAP_GLOBAL_ORDS = false; + CountingAggregator aggregator = createCountingAggregator(aggregationBuilder, indexSearcher, false, fieldType); + + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + Terms result = reduce(aggregator); + assertEquals(5, result.getBuckets().size()); + assertEquals("", result.getBuckets().get(0).getKeyAsString()); + if (includeDocCountField) { + assertEquals(11L, result.getBuckets().get(0).getDocCount()); + } else { assertEquals(2L, result.getBuckets().get(0).getDocCount()); - assertEquals("a", result.getBuckets().get(1).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(1).getDocCount()); - assertEquals("b", result.getBuckets().get(2).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(2).getDocCount()); - assertEquals("c", result.getBuckets().get(3).getKeyAsString()); - assertEquals(1L, result.getBuckets().get(3).getDocCount()); - assertEquals("d", result.getBuckets().get(4).getKeyAsString()); - assertEquals(1L, result.getBuckets().get(4).getDocCount()); - assertTrue(AggregationInspectionHelper.hasValue((InternalTerms) result)); } + assertEquals("a", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + assertEquals("b", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + assertEquals("c", result.getBuckets().get(3).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(3).getDocCount()); + assertEquals("d", result.getBuckets().get(4).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(4).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms) result)); + + assertEquals(expectedCollectCount, aggregator.getCollectCount().get()); } } } @@ -1543,5 +1658,4 @@ private T reduce(Aggregator agg) throws IOExcept doAssertReducedMultiBucketConsumer(result, reduceBucketConsumer); return result; } - } diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index d0e01c5461c79..4bd4d406e4391 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -122,6 +122,7 @@ import java.util.concurrent.TimeUnit; import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -437,10 +438,16 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + // Do not expect an exact match when terminate_after is used in conjunction to size = 0 as an optimization introduced by + // https://issues.apache.org/jira/browse/LUCENE-10620 can produce a total hit count >= terminated_after, because + // TotalHitCountCollector is used in this case as part of Weight#count() optimization context.setSize(0); QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(1L), lessThanOrEqualTo((long) numDocs)) + ); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } @@ -466,7 +473,10 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.parsedQuery(new ParsedQuery(bq)); QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(1L), lessThanOrEqualTo((long) numDocs)) + ); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } { @@ -486,9 +496,12 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(1L), lessThanOrEqualTo((long) numDocs)) + ); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); - assertThat(manager.getTotalHits(), equalTo(1)); + assertThat(manager.getTotalHits(), allOf(greaterThanOrEqualTo(1), lessThanOrEqualTo(numDocs))); } // tests with trackTotalHits and terminateAfter @@ -503,7 +516,10 @@ public void testTerminateAfterEarlyTermination() throws Exception { if (trackTotalHits == -1) { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L)); } else { - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) Math.min(trackTotalHits, 10))); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(Math.min(trackTotalHits, 10L)), lessThanOrEqualTo((long) numDocs)) + ); } assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); // The concurrent search terminates the collection when the number of hits is reached by each @@ -511,7 +527,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { // slices (as the unit of concurrency). To address that, we have to use the shared global state, // much as HitsThresholdChecker does. if (executor == null) { - assertThat(manager.getTotalHits(), equalTo(10)); + assertThat(manager.getTotalHits(), allOf(greaterThanOrEqualTo(Math.min(trackTotalHits, 10)), lessThanOrEqualTo(numDocs))); } } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index ac0447dbebf7e..4eb49ebb42241 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -34,11 +34,13 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.CompositeReaderContext; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -62,6 +64,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.TriConsumer; import org.opensearch.common.TriFunction; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; @@ -121,6 +124,7 @@ import org.opensearch.search.aggregations.AggregatorFactories.Builder; import org.opensearch.search.aggregations.MultiBucketConsumerService.MultiBucketConsumer; import org.opensearch.search.aggregations.bucket.nested.NestedAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregator; import org.opensearch.search.aggregations.metrics.MetricsAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; @@ -147,6 +151,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; @@ -178,6 +183,14 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase { // A list of field types that should not be tested, or are not currently supported private static List TYPE_TEST_DENYLIST; + protected static final TriConsumer ADD_SORTED_SET_FIELD_NOT_INDEXED = (document, field, value) -> document + .add(new SortedSetDocValuesField(field, new BytesRef(value))); + + protected static final TriConsumer ADD_SORTED_SET_FIELD_INDEXED = (document, field, value) -> { + document.add(new SortedSetDocValuesField(field, new BytesRef(value))); + document.add(new StringField(field, value, Field.Store.NO)); + }; + static { List denylist = new ArrayList<>(); denylist.add(ObjectMapper.CONTENT_TYPE); // Cannot aggregate objects @@ -433,7 +446,6 @@ protected QueryShardContext queryShardContextMock( CircuitBreakerService circuitBreakerService, BigArrays bigArrays ) { - return new QueryShardContext( 0, indexSettings, @@ -1096,6 +1108,89 @@ protected void doWriteTo(StreamOutput out) throws IOException { } } + /** + * Wrapper around Aggregator class + * Maintains a count for times collect() is invoked - number of documents visited + */ + protected static class CountingAggregator extends Aggregator { + private final AtomicInteger collectCounter; + public final Aggregator delegate; + + public CountingAggregator(AtomicInteger collectCounter, TermsAggregator delegate) { + this.collectCounter = collectCounter; + this.delegate = delegate; + } + + public AtomicInteger getCollectCount() { + return collectCounter; + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public String name() { + return delegate.name(); + } + + @Override + public SearchContext context() { + return delegate.context(); + } + + @Override + public Aggregator parent() { + return delegate.parent(); + } + + @Override + public Aggregator subAggregator(String name) { + return delegate.subAggregator(name); + } + + @Override + public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + return delegate.buildAggregations(owningBucketOrds); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + return delegate.buildEmptyAggregation(); + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + delegate.getLeafCollector(ctx).collect(doc, bucket); + collectCounter.incrementAndGet(); + } + }; + } + + @Override + public ScoreMode scoreMode() { + return delegate.scoreMode(); + } + + @Override + public void preCollection() throws IOException { + delegate.preCollection(); + } + + @Override + public void postCollection() throws IOException { + delegate.postCollection(); + } + + public void setWeight(Weight weight) { + this.delegate.setWeight(weight); + } + } + public static class InternalAggCardinality extends InternalAggregation { private final CardinalityUpperBound cardinality; From a5f335f94b135bf9e7f921f7b3859a019d910f22 Mon Sep 17 00:00:00 2001 From: David Zane <38449481+dzane17@users.noreply.github.com> Date: Wed, 13 Mar 2024 16:43:48 -0500 Subject: [PATCH 11/11] Tracing for deep search path (#12103) (#12646) * Tracing for deep search path Signed-off-by: David Zane * Refactor search phase tracing instrumentation to detach span creation from the SearchRequestOperationsListener Signed-off-by: Andriy Redko * Close WrappedPhase span Signed-off-by: David Zane * Add asserting listener class Signed-off-by: David Zane * Cleanup AbstractSearchAsyncActionTests tests Signed-off-by: Andriy Redko * Cleanup SearchAsyncActionTests tests Signed-off-by: Andriy Redko * Cleanup CanMatchPreFilterSearchPhaseTests tests Signed-off-by: Andriy Redko --------- Signed-off-by: David Zane Signed-off-by: Andriy Redko Co-authored-by: Andriy Redko (cherry picked from commit 77a119319c14b0c1fa45dc85a618bf2429576b1e) --- CHANGELOG.md | 1 + .../telemetry/tracing/SpanContext.java | 15 ++ .../tracing/SpanCreationContext.java | 4 +- .../core/listener/QueryInsightsListener.java | 2 +- .../search/AbstractSearchAsyncAction.java | 23 ++- .../search/CanMatchPreFilterSearchPhase.java | 7 +- .../SearchDfsQueryThenFetchAsyncAction.java | 7 +- .../SearchQueryThenFetchAsyncAction.java | 7 +- .../action/search/SearchRequestContext.java | 2 +- .../SearchRequestOperationsListener.java | 16 +- .../action/search/SearchRequestSlowLog.java | 2 +- .../action/search/SearchRequestStats.java | 2 +- .../action/search/TransportSearchAction.java | 110 ++++++----- .../telemetry/tracing/AttributeNames.java | 15 ++ .../telemetry/tracing/SpanBuilder.java | 40 ++++ ...ceableSearchRequestOperationsListener.java | 75 ++++++++ .../AbstractSearchAsyncActionTests.java | 54 ++---- .../CanMatchPreFilterSearchPhaseTests.java | 180 +++++++++--------- .../action/search/SearchAsyncActionTests.java | 57 +++++- .../SearchQueryThenFetchAsyncActionTests.java | 27 ++- ...erationsCompositeListenerFactoryTests.java | 2 +- ...stOperationsListenerAssertingListener.java | 39 ++++ .../SearchRequestOperationsListenerTests.java | 2 +- .../search/SearchRequestStatsTests.java | 4 +- .../snapshots/SnapshotResiliencyTests.java | 3 +- .../telemetry/tracing/SpanBuilderTests.java | 11 ++ 26 files changed, 506 insertions(+), 201 deletions(-) create mode 100644 server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java create mode 100644 server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 66c7f5a4020bd..f5c95d071d5d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835)) - [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) - The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586)) +- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) ### Dependencies - Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290)) diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java index f9af611553aff..e5e62c795e5d0 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java @@ -31,4 +31,19 @@ public SpanContext(Span span) { Span getSpan() { return span; } + + /** + * Sets the error for the current span behind this context + * @param cause error + */ + public void setError(final Exception cause) { + span.setError(cause); + } + + /** + * Ends current span + */ + public void endSpan() { + span.endSpan(); + } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java index cbbcfe7a85d57..6af7c440f8de9 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java @@ -79,8 +79,8 @@ public SpanCreationContext attributes(Attributes attributes) { } /** - * Sets the parent for spann - * @param parent parent + * Sets the parent for span + * @param parent parent span context * @return spanCreationContext */ public SpanCreationContext parent(SpanContext parent) { diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java index 705273f52a567..9ec8673147c38 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java @@ -116,7 +116,7 @@ public void onPhaseStart(SearchPhaseContext context) {} public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} @Override - public void onPhaseFailure(SearchPhaseContext context) {} + public void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} @Override public void onRequestStart(SearchRequestContext searchRequestContext) {} diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 3b99ea9cc1cd2..0520a4a7aecec 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -58,6 +58,10 @@ import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.pipeline.PipelinedRequest; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanCreationContext; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.ArrayDeque; @@ -116,6 +120,7 @@ abstract class AbstractSearchAsyncAction exten private final Map pendingExecutionsPerNode = new ConcurrentHashMap<>(); private final boolean throttleConcurrentRequests; private final SearchRequestContext searchRequestContext; + private final Tracer tracer; private SearchPhase currentPhase; private boolean currentPhaseHasLifecycle; @@ -140,7 +145,8 @@ abstract class AbstractSearchAsyncAction exten SearchPhaseResults resultConsumer, int maxConcurrentRequestsPerNode, SearchResponse.Clusters clusters, - SearchRequestContext searchRequestContext + SearchRequestContext searchRequestContext, + Tracer tracer ) { super(name); final List toSkipIterators = new ArrayList<>(); @@ -177,6 +183,7 @@ abstract class AbstractSearchAsyncAction exten this.results = resultConsumer; this.clusters = clusters; this.searchRequestContext = searchRequestContext; + this.tracer = tracer; } @Override @@ -221,6 +228,7 @@ public final void start() { null ) ); + onRequestEnd(searchRequestContext); return; } executePhase(this); @@ -460,7 +468,8 @@ private void onRequestEnd(SearchRequestContext searchRequestContext) { } private void executePhase(SearchPhase phase) { - try { + Span phaseSpan = tracer.startSpan(SpanCreationContext.server().name("[phase/" + phase.getName() + "]")); + try (final SpanScope scope = tracer.withSpanInScope(phaseSpan)) { onPhaseStart(phase); phase.recordAndRun(); } catch (Exception e) { @@ -468,7 +477,15 @@ private void executePhase(SearchPhase phase) { logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e); } + if (currentPhaseHasLifecycle == false) { + phaseSpan.setError(e); + } + onPhaseFailure(phase, "", e); + } finally { + if (currentPhaseHasLifecycle == false) { + phaseSpan.endSpan(); + } } } @@ -733,7 +750,7 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At @Override public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { if (currentPhaseHasLifecycle) { - this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this); + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this, cause); } raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); } diff --git a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java index c693eea4a2c33..952d83b9e4539 100644 --- a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java @@ -44,6 +44,7 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.MinAndMax; import org.opensearch.search.sort.SortOrder; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.Comparator; @@ -91,7 +92,8 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction, SearchPhase> phaseFactory, SearchResponse.Clusters clusters, - SearchRequestContext searchRequestContext + SearchRequestContext searchRequestContext, + Tracer tracer ) { // We set max concurrent shard requests to the number of shards so no throttling happens for can_match requests super( @@ -112,7 +114,8 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction(shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters, - searchRequestContext + searchRequestContext, + tracer ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; this.searchPhaseController = searchPhaseController; diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java index c26bd5eef8c15..c8ab5fdaf61a1 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -43,6 +43,7 @@ import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.Map; @@ -82,7 +83,8 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction new ParameterizedMessage("onPhaseFailure listener [{}] failed", listener), e); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java index 74e04d976cb1c..a9a07c6aca7f4 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java @@ -140,7 +140,7 @@ protected void onPhaseStart(SearchPhaseContext context) {} protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} @Override - protected void onPhaseFailure(SearchPhaseContext context) {} + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} @Override protected void onRequestStart(SearchRequestContext searchRequestContext) {} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index ac32b08afb7f6..97ef94055faf7 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -71,7 +71,7 @@ protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searc } @Override - protected void onPhaseFailure(SearchPhaseContext context) { + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 3d1a25a8aa01f..65cfd35489033 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -88,6 +88,12 @@ import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; +import org.opensearch.telemetry.tracing.listener.TraceableSearchRequestOperationsListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterAware; import org.opensearch.transport.RemoteClusterService; @@ -173,6 +179,7 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new); this.client = client; @@ -215,6 +223,7 @@ public TransportSearchAction( this.searchRequestOperationsCompositeListenerFactory = searchRequestOperationsCompositeListenerFactory; clusterService.getClusterSettings() .addSettingsUpdateConsumer(SEARCH_QUERY_METRICS_ENABLED_SETTING, this::setSearchQueryMetricsEnabled); + this.tracer = tracer; } private void setSearchQueryMetricsEnabled(boolean searchQueryMetricsEnabled) { @@ -384,7 +393,8 @@ public AbstractSearchAsyncAction asyncSearchAction( new ArraySearchPhaseResults<>(shardsIts.size()), searchRequest.getMaxConcurrentShardRequests(), clusters, - searchRequestContext + searchRequestContext, + tracer ) { @Override protected void executePhaseOnShard( @@ -431,49 +441,58 @@ private void executeRequest( if (originalSearchRequest.isPhaseTook() == null) { originalSearchRequest.setPhaseTook(clusterService.getClusterSettings().get(SEARCH_PHASE_TOOK_ENABLED)); } - SearchRequestOperationsListener.CompositeListener requestOperationsListeners = searchRequestOperationsCompositeListenerFactory - .buildCompositeListener(originalSearchRequest, logger); - SearchRequestContext searchRequestContext = new SearchRequestContext(requestOperationsListeners, originalSearchRequest); - searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); - - PipelinedRequest searchRequest; - ActionListener listener; - try { - searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); - listener = searchRequest.transformResponseListener(originalListener); - } catch (Exception e) { - originalListener.onFailure(e); - return; - } - ActionListener requestTransformListener = ActionListener.wrap(sr -> { - if (searchQueryMetricsEnabled) { - try { - searchQueryCategorizer.categorize(sr.source()); - } catch (Exception e) { - logger.error("Error while trying to categorize the query.", e); - } + final Span requestSpan = tracer.startSpan(SpanBuilder.from(task, actionName)); + try (final SpanScope spanScope = tracer.withSpanInScope(requestSpan)) { + SearchRequestOperationsListener.CompositeListener requestOperationsListeners; + final ActionListener updatedListener = TraceableActionListener.create(originalListener, requestSpan, tracer); + requestOperationsListeners = searchRequestOperationsCompositeListenerFactory.buildCompositeListener( + originalSearchRequest, + logger, + TraceableSearchRequestOperationsListener.create(tracer, requestSpan) + ); + SearchRequestContext searchRequestContext = new SearchRequestContext(requestOperationsListeners, originalSearchRequest); + searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); + + PipelinedRequest searchRequest; + ActionListener listener; + try { + searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); + listener = searchRequest.transformResponseListener(updatedListener); + } catch (Exception e) { + updatedListener.onFailure(e); + return; } - ActionListener rewriteListener = buildRewriteListener( - sr, - task, - timeProvider, - searchAsyncActionProvider, - listener, - searchRequestContext - ); - if (sr.source() == null) { - rewriteListener.onResponse(sr.source()); - } else { - Rewriteable.rewriteAndFetch( - sr.source(), - searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), - rewriteListener + ActionListener requestTransformListener = ActionListener.wrap(sr -> { + if (searchQueryMetricsEnabled) { + try { + searchQueryCategorizer.categorize(sr.source()); + } catch (Exception e) { + logger.error("Error while trying to categorize the query.", e); + } + } + + ActionListener rewriteListener = buildRewriteListener( + sr, + task, + timeProvider, + searchAsyncActionProvider, + listener, + searchRequestContext ); - } - }, listener::onFailure); - searchRequest.transformRequest(requestTransformListener); + if (sr.source() == null) { + rewriteListener.onResponse(sr.source()); + } else { + Rewriteable.rewriteAndFetch( + sr.source(), + searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), + rewriteListener + ); + } + }, listener::onFailure); + searchRequest.transformRequest(requestTransformListener); + } } private ActionListener buildRewriteListener( @@ -1240,7 +1259,8 @@ private AbstractSearchAsyncAction searchAsyncAction ) ), clusters, - searchRequestContext + searchRequestContext, + tracer ); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( @@ -1271,7 +1291,8 @@ private AbstractSearchAsyncAction searchAsyncAction clusterState, task, clusters, - searchRequestContext + searchRequestContext, + tracer ); break; case QUERY_THEN_FETCH: @@ -1292,7 +1313,8 @@ private AbstractSearchAsyncAction searchAsyncAction clusterState, task, clusters, - searchRequestContext + searchRequestContext, + tracer ); break; default: diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java index 6a97914b04ebc..212ef3c713d8e 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java @@ -75,6 +75,16 @@ private AttributeNames() { */ public static final String TRANSPORT_ACTION = "action"; + /** + * Task id + */ + public static final String TASK_ID = "task_id"; + + /** + * Parent task id + */ + public static final String PARENT_TASK_ID = "parent_task_id"; + /** * Index Name */ @@ -99,4 +109,9 @@ private AttributeNames() { * Refresh Policy */ public static final String REFRESH_POLICY = "refresh_policy"; + + /** + * Search Response Total Hits + */ + public static final String TOTAL_HITS = "total_hits"; } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java index 70658c5d71bf3..42e64109b72fd 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java @@ -15,6 +15,7 @@ import org.opensearch.core.common.Strings; import org.opensearch.http.HttpRequest; import org.opensearch.rest.RestRequest; +import org.opensearch.tasks.Task; import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.transport.TcpChannel; import org.opensearch.transport.Transport; @@ -196,4 +197,43 @@ private static Attributes buildSpanAttributes(String nodeId, ReplicatedWriteRequ return attributes; } + /** + * Creates {@link SpanCreationContext} with parent set to specified SpanContext. + * @param spanName name of span. + * @param parentSpan target parent span. + * @return context + */ + public static SpanCreationContext from(String spanName, SpanContext parentSpan) { + return SpanCreationContext.server().name(spanName).parent(parentSpan); + } + + /** + * Creates {@link SpanCreationContext} with parent set to specified SpanContext. + * @param task search task. + * @param actionName action. + * @return context + */ + public static SpanCreationContext from(Task task, String actionName) { + return SpanCreationContext.server().name(createSpanName(task, actionName)).attributes(buildSpanAttributes(task, actionName)); + } + + private static Attributes buildSpanAttributes(Task task, String actionName) { + Attributes attributes = Attributes.create().addAttribute(AttributeNames.TRANSPORT_ACTION, actionName); + if (task != null) { + attributes.addAttribute(AttributeNames.TASK_ID, task.getId()); + if (task.getParentTaskId() != null && task.getParentTaskId().isSet()) { + attributes.addAttribute(AttributeNames.PARENT_TASK_ID, task.getParentTaskId().getId()); + } + } + return attributes; + + } + + private static String createSpanName(Task task, String actionName) { + if (task != null) { + return task.getType() + SEPARATOR + task.getAction(); + } else { + return actionName; + } + } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java new file mode 100644 index 0000000000000..71fb59194c447 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.listener; + +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchRequestContext; +import org.opensearch.action.search.SearchRequestOperationsListener; +import org.opensearch.telemetry.tracing.AttributeNames; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanContext; +import org.opensearch.telemetry.tracing.Tracer; + +/** + * SearchRequestOperationsListener subscriber for search request tracing + * + * @opensearch.internal + */ +public final class TraceableSearchRequestOperationsListener extends SearchRequestOperationsListener { + private final Tracer tracer; + private final Span requestSpan; + private SpanContext phaseSpanContext; + + public TraceableSearchRequestOperationsListener(final Tracer tracer, final Span requestSpan) { + this.tracer = tracer; + this.requestSpan = requestSpan; + this.phaseSpanContext = null; + } + + public static SearchRequestOperationsListener create(final Tracer tracer, final Span requestSpan) { + if (tracer.isRecording()) { + return new TraceableSearchRequestOperationsListener(tracer, requestSpan); + } else { + return SearchRequestOperationsListener.NOOP; + } + } + + @Override + protected void onPhaseStart(SearchPhaseContext context) { + assert phaseSpanContext == null : "There should be only one search phase active at a time"; + phaseSpanContext = tracer.getCurrentSpan(); + } + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + assert phaseSpanContext != null : "There should be a search phase active at that time"; + phaseSpanContext.endSpan(); + phaseSpanContext = null; + } + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { + assert phaseSpanContext != null : "There should be a search phase active at that time"; + phaseSpanContext.setError((Exception) cause); + phaseSpanContext.endSpan(); + phaseSpanContext = null; + } + + @Override + public void onRequestStart(SearchRequestContext searchRequestContext) {} + + @Override + public void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + // add response-related attributes on request end + requestSpan.addAttribute( + AttributeNames.TOTAL_HITS, + searchRequestContext.totalHits() == null ? 0 : searchRequestContext.totalHits().value + ); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index 3af7af114e96d..420289d3ff2e5 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -58,6 +58,7 @@ import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -85,19 +86,16 @@ import java.util.function.BiFunction; import java.util.stream.IntStream; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.mock; public class AbstractSearchAsyncActionTests extends OpenSearchTestCase { private final List> resolvedNodes = new ArrayList<>(); private final Set releasedContexts = new CopyOnWriteArraySet<>(); private ExecutorService executor; - private SearchRequestOperationsListener assertingListener; + private SearchRequestOperationsListenerAssertingListener assertingListener; ThreadPool threadPool; @Before @@ -106,27 +104,7 @@ public void setUp() throws Exception { super.setUp(); executor = Executors.newFixedThreadPool(1); threadPool = new TestThreadPool(getClass().getName()); - assertingListener = new SearchRequestOperationsListener() { - private volatile SearchPhase phase; - - @Override - protected void onPhaseStart(SearchPhaseContext context) { - assertThat(phase, is(nullValue())); - phase = context.getCurrentPhase(); - } - - @Override - protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { - assertThat(phase, is(context.getCurrentPhase())); - phase = null; - } - - @Override - protected void onPhaseFailure(SearchPhaseContext context) { - assertThat(phase, is(context.getCurrentPhase())); - phase = null; - } - }; + assertingListener = new SearchRequestOperationsListenerAssertingListener(); } @After @@ -136,6 +114,7 @@ public void tearDown() throws Exception { executor.shutdown(); assertTrue(executor.awaitTermination(1, TimeUnit.SECONDS)); ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + assertingListener.assertFinished(); } private AbstractSearchAsyncAction createAction( @@ -207,7 +186,8 @@ private AbstractSearchAsyncAction createAction( new SearchRequestContext( new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), request - ) + ), + NoopTracer.INSTANCE ) { @Override protected SearchPhase getNextPhase(final SearchPhaseResults results, SearchPhaseContext context) { @@ -371,7 +351,7 @@ public void testOnPhaseFailureAndVerifyListeners() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); SearchRequestStats testListener = new SearchRequestStats(clusterSettings); - final List requestOperationListeners = List.of(testListener); + final List requestOperationListeners = List.of(testListener, assertingListener); SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); action.start(); assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); @@ -403,6 +383,7 @@ public void run() { SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); searchShardIterator.resetAndSkip(); action.skipShard(searchShardIterator); + action.start(); action.executeNextPhase(action, fetchPhase); assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); action.onPhaseFailure(new SearchPhase("test") { @@ -645,7 +626,7 @@ public void testExecutePhaseOnShardFailureAndThrowException() throws Interrupted public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedException { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); SearchRequestStats testListener = new SearchRequestStats(clusterSettings); - final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + final List requestOperationListeners = new ArrayList<>(List.of(testListener, assertingListener)); long delay = (randomIntBetween(1, 5)); delay = delay * 10; @@ -685,8 +666,8 @@ public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedEx assertEquals(1, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); action.executeNextPhase(expandPhase, fetchPhase); + action.onPhaseDone(); /* finish phase since we don't have reponse being sent */ - action.sendSearchResponse(mock(InternalSearchResponse.class), mock(String.valueOf(QuerySearchResult.class))); assertThat(testListener.getPhaseMetric(expandPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); assertEquals(1, testListener.getPhaseTotal(expandPhase.getSearchPhaseName())); assertEquals(0, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); @@ -695,7 +676,7 @@ public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedEx public void testOnPhaseListenersWithDfsType() throws InterruptedException { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); SearchRequestStats testListener = new SearchRequestStats(clusterSettings); - final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + final List requestOperationListeners = new ArrayList<>(List.of(testListener, assertingListener)); SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( requestOperationListeners @@ -712,6 +693,11 @@ public void testOnPhaseListenersWithDfsType() throws InterruptedException { searchDfsQueryThenFetchAsyncAction.skipShard(searchShardIterator); searchDfsQueryThenFetchAsyncAction.executeNextPhase(searchDfsQueryThenFetchAsyncAction, fetchPhase); + searchDfsQueryThenFetchAsyncAction.onPhaseFailure( + fetchPhase, + "Something went wrong", + null + ); /* finalizing the fetch phase since we do adhoc phase lifecycle calls */ assertThat(testListener.getPhaseMetric(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName()), greaterThanOrEqualTo(delay)); assertEquals(1, testListener.getPhaseTotal(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); @@ -754,7 +740,7 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct null, null, null, - null, + controller, executor, resultConsumer, searchRequest, @@ -767,7 +753,8 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct new SearchRequestContext( new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger), searchRequest - ) + ), + NoopTracer.INSTANCE ); } @@ -820,7 +807,8 @@ private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( new SearchRequestContext( new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger), searchRequest - ) + ), + NoopTracer.INSTANCE ) { @Override ShardSearchFailure[] buildShardFailures() { diff --git a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 30fc50f91dabd..1881c705fe6b3 100644 --- a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -55,6 +55,7 @@ import org.opensearch.search.sort.MinAndMax; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; @@ -66,7 +67,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashSet; -import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -75,7 +75,6 @@ import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.stream.IntStream; @@ -83,42 +82,22 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.collection.IsEmptyCollection.empty; public class CanMatchPreFilterSearchPhaseTests extends OpenSearchTestCase { - private SearchRequestOperationsListener assertingListener; - private Set phases; + private SearchRequestOperationsListenerAssertingListener assertingListener; @Before public void setUp() throws Exception { super.setUp(); - phases = Collections.newSetFromMap(new IdentityHashMap<>()); - assertingListener = new SearchRequestOperationsListener() { - @Override - protected void onPhaseStart(SearchPhaseContext context) { - assertThat(phases.contains(context.getCurrentPhase()), is(false)); - phases.add(context.getCurrentPhase()); - } - - @Override - protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { - assertThat(phases.contains(context.getCurrentPhase()), is(true)); - phases.remove(context.getCurrentPhase()); - } - - @Override - protected void onPhaseFailure(SearchPhaseContext context) { - assertThat(phases.contains(context.getCurrentPhase()), is(true)); - phases.remove(context.getCurrentPhase()); - } - }; + assertingListener = new SearchRequestOperationsListenerAssertingListener(); } @After public void tearDown() throws Exception { super.tearDown(); - assertBusy(() -> assertThat(phases, empty()), 5, TimeUnit.SECONDS); + + assertingListener.assertFinished(); } public void testFilterShards() throws InterruptedException { @@ -164,6 +143,10 @@ public void sendCanMatch( final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -182,15 +165,13 @@ public void sendCanMatch( @Override public void run() throws IOException { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); - assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -260,6 +241,10 @@ public void sendCanMatch( final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -278,15 +263,13 @@ public void sendCanMatch( @Override public void run() throws IOException { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); - assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -346,6 +329,10 @@ public void sendCanMatch( (e) -> { throw new AssertionError("unexpected", e); } ); Map aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); final CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -360,58 +347,57 @@ public void sendCanMatch( timeProvider, ClusterState.EMPTY_STATE, null, - (iter) -> new AbstractSearchAsyncAction("test", logger, transportService, (cluster, node) -> { - assert cluster == null : "cluster was not null: " + cluster; - return lookup.get(node); - }, - aliasFilters, - Collections.emptyMap(), - Collections.emptyMap(), - executor, - searchRequest, - responseListener, - iter, - new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), - ClusterState.EMPTY_STATE, - null, - new ArraySearchPhaseResults<>(iter.size()), - randomIntBetween(1, 32), - SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), - searchRequest - ) - ) { - - @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { - return new SearchPhase("test") { + (iter) -> { + return new WrappingSearchAsyncActionPhase( + new AbstractSearchAsyncAction("test", logger, transportService, (cluster, node) -> { + assert cluster == null : "cluster was not null: " + cluster; + return lookup.get(node); + }, + aliasFilters, + Collections.emptyMap(), + Collections.emptyMap(), + executor, + searchRequest, + responseListener, + iter, + new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), + ClusterState.EMPTY_STATE, + null, + new ArraySearchPhaseResults<>(iter.size()), + randomIntBetween(1, 32), + SearchResponse.Clusters.EMPTY, + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE + ) { @Override - public void run() { - latch.countDown(); + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new WrappingSearchAsyncActionPhase(this) { + @Override + public void run() { + latch.countDown(); + } + }; } - }; - } - @Override - protected void executePhaseOnShard( - final SearchShardIterator shardIt, - final SearchShardTarget shard, - final SearchActionListener listener - ) { - if (randomBoolean()) { - listener.onResponse(new SearchPhaseResult() { - }); - } else { - listener.onFailure(new Exception("failure")); + @Override + protected void executePhaseOnShard( + final SearchShardIterator shardIt, + final SearchShardTarget shard, + final SearchActionListener listener + ) { + if (randomBoolean()) { + listener.onResponse(new SearchPhaseResult() { + }); + } else { + listener.onFailure(new Exception("failure")); + } + } } - } + ); }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -475,6 +461,10 @@ public void sendCanMatch( searchRequest.source(new SearchSourceBuilder().sort(SortBuilders.fieldSort("timestamp").order(order))); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -493,15 +483,13 @@ public void sendCanMatch( @Override public void run() { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); - assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -580,6 +568,10 @@ public void sendCanMatch( searchRequest.source(new SearchSourceBuilder().sort(SortBuilders.fieldSort("timestamp").order(order))); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -598,15 +590,13 @@ public void sendCanMatch( @Override public void run() { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); - assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -730,7 +720,8 @@ public void run() { }; }, SearchResponse.Clusters.EMPTY, - searchRequestContext + searchRequestContext, + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -779,7 +770,8 @@ private static final class SearchDfsQueryAsyncAction extends AbstractSearchAsync new ArraySearchPhaseResults<>(shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters, - searchRequestContext + searchRequestContext, + NoopTracer.INSTANCE ); this.listener = searchRequestContext.getSearchRequestOperationsListener(); } diff --git a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java index af7adc4e58fb8..35e90ff662b19 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java @@ -51,11 +51,14 @@ import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; @@ -79,6 +82,23 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class SearchAsyncActionTests extends OpenSearchTestCase { + private SearchRequestOperationsListenerAssertingListener assertingListener; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + + assertingListener = new SearchRequestOperationsListenerAssertingListener(); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + + assertingListener.assertFinished(); + } public void testSkipSearchShards() throws InterruptedException { SearchRequest request = new SearchRequest(); @@ -117,6 +137,10 @@ public void testSkipSearchShards() throws InterruptedException { lookup.put(replicaNode.getId(), new MockConnection(replicaNode)); Map aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); AtomicInteger numRequests = new AtomicInteger(0); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction( "test", logger, @@ -138,7 +162,8 @@ public void testSkipSearchShards() throws InterruptedException { new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext(searchRequestOperationsListener, request), + NoopTracer.INSTANCE ) { @Override @@ -169,6 +194,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults res @Override public void run() { assertTrue(searchPhaseDidRun.compareAndSet(false, true)); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); } }; } @@ -236,6 +262,10 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { Map aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); CountDownLatch awaitInitialRequests = new CountDownLatch(1); AtomicInteger numRequests = new AtomicInteger(0); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction( "test", logger, @@ -257,7 +287,8 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext(searchRequestOperationsListener, request), + NoopTracer.INSTANCE ) { @Override @@ -295,6 +326,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults res return new SearchPhase("test") { @Override public void run() { + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); assertTrue(searchPhaseDidRun.compareAndSet(false, true)); } }; @@ -375,7 +407,11 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + request + ), + NoopTracer.INSTANCE ) { TestSearchResponse response = new TestSearchResponse(); @@ -411,6 +447,7 @@ public void run() { sendReleaseSearchContext(result.getContextId(), new MockConnection(result.node), OriginalIndices.NONE); } responseListener.onResponse(response); + assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); } }; } @@ -498,7 +535,11 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + request + ), + NoopTracer.INSTANCE ) { TestSearchResponse response = new TestSearchResponse(); @@ -591,6 +632,10 @@ public void testAllowPartialResults() throws InterruptedException { Map aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); AtomicInteger numRequests = new AtomicInteger(0); AtomicInteger numFailReplicas = new AtomicInteger(0); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction( "test", logger, @@ -612,7 +657,8 @@ public void testAllowPartialResults() throws InterruptedException { new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext(searchRequestOperationsListener, request), + NoopTracer.INSTANCE ) { @Override protected void executePhaseOnShard( @@ -645,6 +691,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults res @Override public void run() { assertTrue(searchPhaseDidRun.compareAndSet(false, true)); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); } }; } diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java index faf6f86c69c27..aefbbe80d5fa1 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -59,9 +59,12 @@ import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; import org.opensearch.search.sort.SortBuilders; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; +import org.junit.After; +import org.junit.Before; import java.util.Collections; import java.util.List; @@ -77,6 +80,24 @@ import static org.hamcrest.Matchers.instanceOf; public class SearchQueryThenFetchAsyncActionTests extends OpenSearchTestCase { + private SearchRequestOperationsListenerAssertingListener assertingListener; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + + assertingListener = new SearchRequestOperationsListenerAssertingListener(); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + + assertingListener.assertFinished(); + } + public void testBottomFieldSort() throws Exception { testCase(false, false); } @@ -218,15 +239,17 @@ public void sendExecuteQuery( task, SearchResponse.Clusters.EMPTY, new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), searchRequest - ) + ), + NoopTracer.INSTANCE ) { @Override protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { return new SearchPhase("test") { @Override public void run() { + assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); } }; diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java index 1cb336e18b12c..845543fbd9f57 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java @@ -125,7 +125,7 @@ protected void onPhaseStart(SearchPhaseContext context) {} protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} @Override - protected void onPhaseFailure(SearchPhaseContext context) {} + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} }; } } diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java new file mode 100644 index 0000000000000..327371ebcaf0b --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; + +class SearchRequestOperationsListenerAssertingListener extends SearchRequestOperationsListener { + private volatile SearchPhase phase; + + @Override + protected void onPhaseStart(SearchPhaseContext context) { + assertThat(phase, is(nullValue())); + phase = context.getCurrentPhase(); + } + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + assertThat(phase, is(context.getCurrentPhase())); + phase = null; + } + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { + assertThat(phase, is(context.getCurrentPhase())); + phase = null; + } + + public void assertFinished() { + assertThat(phase, is(nullValue())); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java index a53c35a8401b3..990ed95f1aebc 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java @@ -40,7 +40,7 @@ public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRe } @Override - public void onPhaseFailure(SearchPhaseContext context) { + public void onPhaseFailure(SearchPhaseContext context, Throwable cause) { searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } }; diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java index 377ccebbfd418..fb9b26e3f3ad1 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java @@ -36,7 +36,7 @@ public void testSearchRequestPhaseFailure() { when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); testRequestStats.onPhaseStart(ctx); assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); - testRequestStats.onPhaseFailure(ctx); + testRequestStats.onPhaseFailure(ctx, new Throwable()); assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); } } @@ -156,7 +156,7 @@ public void testSearchRequestStatsOnPhaseFailureConcurrently() throws Interrupte threads[i] = new Thread(() -> { phaser.arriveAndAwaitAdvance(); testRequestStats.onPhaseStart(ctx); - testRequestStats.onPhaseFailure(ctx); + testRequestStats.onPhaseFailure(ctx, new Throwable()); countDownLatch.countDown(); }); threads[i].start(); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 7c50e961853b5..4b43b5f9c3ff5 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2310,7 +2310,8 @@ public void onFailure(final Exception e) { client ), NoopMetricsRegistry.INSTANCE, - searchRequestOperationsCompositeListenerFactory + searchRequestOperationsCompositeListenerFactory, + NoopTracer.INSTANCE ) ); actions.put( diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java index 75fc6761a60ef..4b763e4bd4454 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java @@ -21,6 +21,7 @@ import org.opensearch.http.HttpResponse; import org.opensearch.rest.RestRequest; import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.telemetry.tracing.noop.NoopSpan; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportException; @@ -104,6 +105,16 @@ public void testTransportContext() { assertEquals(connection.getNode().getHostAddress(), attributes.getAttributesMap().get(AttributeNames.TRANSPORT_TARGET_HOST)); } + public void testParentSpan() { + String spanName = "test-name"; + SpanContext parentSpanContext = new SpanContext(NoopSpan.INSTANCE); + SpanCreationContext context = SpanBuilder.from(spanName, parentSpanContext); + Attributes attributes = context.getAttributes(); + assertNull(attributes); + assertEquals(spanName, context.getSpanName()); + assertEquals(parentSpanContext, context.getParent()); + } + private static Transport.Connection createTransportConnection() { return new Transport.Connection() { @Override