From 91f9c5dbf417e179c5ab00e49332c17c84661008 Mon Sep 17 00:00:00 2001 From: Ashish Date: Thu, 28 Sep 2023 08:35:04 +0530 Subject: [PATCH 01/14] Update recovery target access time during segment sync for remote index (#10252) * Update recovery target access time during segment sync for remote indexes Signed-off-by: Ashish Singh * Add Integration Test Signed-off-by: Ashish Singh --------- Signed-off-by: Ashish Singh --- .../opensearch/remotestore/RemoteStoreIT.java | 12 ++++++ .../opensearch/index/shard/IndexShard.java | 37 +++++++++++++------ .../opensearch/index/shard/StoreRecovery.java | 1 - .../recovery/PeerRecoveryTargetService.java | 2 +- 4 files changed, 38 insertions(+), 14 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 0aa3a06a108f1..1fb5c2052aded 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -8,6 +8,7 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexRequest; @@ -26,6 +27,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.translog.Translog.Durability; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; @@ -120,6 +122,16 @@ public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogFlush() throws Excep testPeerRecovery(randomIntBetween(2, 5), true); } + public void testPeerRecoveryWithLowActivityTimeout() throws Exception { + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder() + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20kb") + .put(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), "1s") + ); + internalCluster().client().admin().cluster().updateSettings(req).get(); + testPeerRecovery(randomIntBetween(2, 5), true); + } + public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataRefresh() throws Exception { testPeerRecovery(1, false); } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index bbb7d3cf5f30f..d476e8b7c9288 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4726,11 +4726,21 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { } /** - * Downloads segments from remote segment store. - * @param overrideLocal flag to override local segment files with those in remote store - * @throws IOException if exception occurs while reading segments from remote store + * Downloads segments from remote segment store + * @param overrideLocal flag to override local segment files with those in remote store. + * @throws IOException if exception occurs while reading segments from remote store. */ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal) throws IOException { + syncSegmentsFromRemoteSegmentStore(overrideLocal, () -> {}); + } + + /** + * Downloads segments from remote segment store along with updating the access time of the recovery target. + * @param overrideLocal flag to override local segment files with those in remote store. + * @param onFileSync runnable that updates the access time when run. + * @throws IOException if exception occurs while reading segments from remote store. + */ + public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runnable onFileSync) throws IOException { assert indexSettings.isRemoteStoreEnabled(); logger.trace("Downloading segments from remote segment store"); RemoteSegmentStoreDirectory remoteDirectory = getRemoteDirectory(); @@ -4761,7 +4771,7 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal) throws IOE } else { storeDirectory = store.directory(); } - copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal); + copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal, onFileSync); if (remoteSegmentMetadata != null) { final SegmentInfos infosSnapshot = store.buildSegmentInfos( @@ -4821,7 +4831,8 @@ public void syncSegmentsFromGivenRemoteSegmentStore( sourceRemoteDirectory, remoteDirectory, uploadedSegments, - overrideLocal + overrideLocal, + () -> {} ); if (segmentsNFile != null) { try ( @@ -4854,7 +4865,8 @@ private String copySegmentFiles( RemoteSegmentStoreDirectory sourceRemoteDirectory, RemoteSegmentStoreDirectory targetRemoteDirectory, Map uploadedSegments, - boolean overrideLocal + boolean overrideLocal, + final Runnable onFileSync ) throws IOException { Set toDownloadSegments = new HashSet<>(); Set skippedSegments = new HashSet<>(); @@ -4883,9 +4895,7 @@ private String copySegmentFiles( if (toDownloadSegments.isEmpty() == false) { try { - final PlainActionFuture completionListener = PlainActionFuture.newFuture(); - downloadSegments(storeDirectory, sourceRemoteDirectory, targetRemoteDirectory, toDownloadSegments, completionListener); - completionListener.actionGet(); + downloadSegments(storeDirectory, sourceRemoteDirectory, targetRemoteDirectory, toDownloadSegments, onFileSync); } catch (Exception e) { throw new IOException("Error occurred when downloading segments from remote store", e); } @@ -4903,22 +4913,25 @@ private void downloadSegments( RemoteSegmentStoreDirectory sourceRemoteDirectory, RemoteSegmentStoreDirectory targetRemoteDirectory, Set toDownloadSegments, - ActionListener completionListener + final Runnable onFileSync ) { - final Path indexPath = store.shardPath() == null ? null : store.shardPath().resolveIndex(); + final PlainActionFuture completionListener = PlainActionFuture.newFuture(); final GroupedActionListener batchDownloadListener = new GroupedActionListener<>( ActionListener.map(completionListener, v -> null), toDownloadSegments.size() ); final ActionListener segmentsDownloadListener = ActionListener.map(batchDownloadListener, fileName -> { + onFileSync.run(); if (targetRemoteDirectory != null) { targetRemoteDirectory.copyFrom(storeDirectory, fileName, fileName, IOContext.DEFAULT); } return null; }); - toDownloadSegments.forEach(file -> sourceRemoteDirectory.copyTo(file, storeDirectory, indexPath, segmentsDownloadListener)); + final Path indexPath = store.shardPath() == null ? null : store.shardPath().resolveIndex(); + toDownloadSegments.forEach(file -> { sourceRemoteDirectory.copyTo(file, storeDirectory, indexPath, segmentsDownloadListener); }); + completionListener.actionGet(); } private boolean localDirectoryContains(Directory localDirectory, String file, long checksum) { diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index f429d94f7f96c..c0211e1257c8e 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -536,7 +536,6 @@ private void recoverFromRemoteStore(IndexShard indexShard) throws IndexShardReco try { // Download segments from remote segment store indexShard.syncSegmentsFromRemoteSegmentStore(true); - indexShard.syncTranslogFilesFromRemoteTranslog(); // On index creation, the only segment file that is created is segments_N. We can safely discard this file diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 1ea32f4e355e3..65c3e976d17fe 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -245,7 +245,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi indexShard.prepareForIndexRecovery(); final boolean hasRemoteSegmentStore = indexShard.indexSettings().isRemoteStoreEnabled(); if (hasRemoteSegmentStore) { - indexShard.syncSegmentsFromRemoteSegmentStore(false); + indexShard.syncSegmentsFromRemoteSegmentStore(false, recoveryTarget::setLastAccessTime); } final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); final boolean hasNoTranslog = indexShard.indexSettings().isRemoteSnapshot(); From d656e3db592f29466d35452867caa241f5429485 Mon Sep 17 00:00:00 2001 From: Rohit Ashiwal Date: Thu, 28 Sep 2023 18:33:41 +0530 Subject: [PATCH 02/14] Indexing: add Doc Status Counter (#8716) Currently, Opensearch returns a 200 OK response code for a Bulk API call, even though there can be partial/complete failures within the request E2E. This provides doc level stats with respect to the rest status code as 2xx, 4xx, 5xx etc. Signed-off-by: Rohit Ashiwal --- CHANGELOG.md | 1 + .../org/opensearch/core/rest/RestStatus.java | 9 + .../test/nodes.stats/11_indices_metrics.yml | 29 ++ .../org/opensearch/nodestats/NodeStatsIT.java | 276 ++++++++++++++++++ .../action/bulk/TransportBulkAction.java | 47 ++- .../action/update/TransportUpdateAction.java | 25 +- .../opensearch/index/shard/IndexingStats.java | 123 +++++++- .../index/shard/InternalIndexingStats.java | 3 +- .../opensearch/indices/IndicesService.java | 10 + ...ActionIndicesThatCannotBeCreatedTests.java | 1 + .../bulk/TransportBulkActionIngestTests.java | 1 + .../action/bulk/TransportBulkActionTests.java | 3 + .../bulk/TransportBulkActionTookTests.java | 1 + .../org/opensearch/core/RestStatusTests.java | 91 ++++++ .../index/shard/IndexingStatsTests.java | 141 +++++++++ .../snapshots/SnapshotResiliencyTests.java | 1 + 16 files changed, 744 insertions(+), 18 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java create mode 100644 server/src/test/java/org/opensearch/core/RestStatusTests.java create mode 100644 server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 780db204aa1aa..b0b3ce261d517 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) - Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) - Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) +- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) ### Dependencies - Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) diff --git a/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java b/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java index 313bc23bedc90..8441ce8b1b622 100644 --- a/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java +++ b/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java @@ -527,6 +527,15 @@ public int getStatus() { return status; } + /** + * Get category class of a rest status code. + * + * @return Integer representing class category of the concrete rest status code + */ + public int getStatusFamilyCode() { + return status / 100; + } + public static RestStatus readFrom(StreamInput in) throws IOException { return RestStatus.valueOf(in.readString()); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 1f1f42890355e..3f79227ce64e8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -138,6 +138,35 @@ - is_false: nodes.$node_id.indices.translog - is_false: nodes.$node_id.indices.recovery +--- +"Metric - indexing doc_status": + - skip: + version: " - 2.99.99" + reason: "To be introduced in future release :: TODO: change if/when we backport to 2.x" + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: { metric: indices, index_metric: indexing } + + - is_false: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.indexing.doc_status + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery --- "Metric - recovery": diff --git a/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java new file mode 100644 index 0000000000000..f270cb1399072 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java @@ -0,0 +1,276 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.nodestats; + +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.bulk.BulkItemResponse; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.update.UpdateRequest; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.engine.DocumentMissingException; +import org.opensearch.index.engine.VersionConflictEngineException; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.hamcrest.MatcherAssert; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +@ClusterScope(scope = Scope.TEST, numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) +public class NodeStatsIT extends OpenSearchIntegTestCase { + + private final DocStatusStats expectedDocStatusStats = new DocStatusStats(); + private static final String FIELD = "dummy_field"; + private static final String VALUE = "dummy_value"; + private static final Map SOURCE = singletonMap(FIELD, VALUE); + + public void testNodeIndicesStatsDocStatusStatsIndexBulk() { + { // Testing Index + final String INDEX = "test_index"; + final String ID = "id"; + { // Testing Normal Index + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE)).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertDocStatusStats(); + } + { // Testing Missing Alias + updateExpectedDocStatusCounter( + expectThrows( + IndexNotFoundException.class, + () -> client().index(new IndexRequest(INDEX).id("missing_alias").setRequireAlias(true).source(SOURCE)).actionGet() + ) + ); + assertDocStatusStats(); + } + { + // Test Missing Pipeline: Ingestion failure, not Indexing failure + expectThrows( + IllegalArgumentException.class, + () -> client().index(new IndexRequest(INDEX).id("missing_pipeline").setPipeline("missing").source(SOURCE)).actionGet() + ); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().index(new IndexRequest(INDEX).id(docId).source(SOURCE).setIfSeqNo(1L).setIfPrimaryTerm(99L)) + .actionGet() + ) + ); + assertDocStatusStats(); + } + } + { // Testing Bulk + final String INDEX = "bulk_index"; + + int sizeOfIndexRequests = scaledRandomIntBetween(10, 20); + int sizeOfDeleteRequests = scaledRandomIntBetween(5, sizeOfIndexRequests); + int sizeOfNotFoundRequests = scaledRandomIntBetween(5, sizeOfIndexRequests); + + BulkRequest bulkRequest = new BulkRequest(); + + for (int i = 0; i < sizeOfIndexRequests; ++i) { + bulkRequest.add(new IndexRequest(INDEX).id(String.valueOf(i)).source(SOURCE)); + } + + BulkResponse response = client().bulk(bulkRequest).actionGet(); + + MatcherAssert.assertThat(response.hasFailures(), equalTo(false)); + MatcherAssert.assertThat(response.getItems().length, equalTo(sizeOfIndexRequests)); + + for (BulkItemResponse itemResponse : response.getItems()) { + updateExpectedDocStatusCounter(itemResponse.getResponse()); + } + + refresh(INDEX); + bulkRequest.requests().clear(); + + for (int i = 0; i < sizeOfDeleteRequests; ++i) { + bulkRequest.add(new DeleteRequest(INDEX, String.valueOf(i))); + } + for (int i = 0; i < sizeOfNotFoundRequests; ++i) { + bulkRequest.add(new DeleteRequest(INDEX, String.valueOf(25 + i))); + } + + response = client().bulk(bulkRequest).actionGet(); + + MatcherAssert.assertThat(response.hasFailures(), equalTo(false)); + MatcherAssert.assertThat(response.getItems().length, equalTo(sizeOfDeleteRequests + sizeOfNotFoundRequests)); + + for (BulkItemResponse itemResponse : response.getItems()) { + updateExpectedDocStatusCounter(itemResponse.getResponse()); + } + + refresh(INDEX); + assertDocStatusStats(); + } + } + + public void testNodeIndicesStatsDocStatusStatsCreateDeleteUpdate() { + { // Testing Create + final String INDEX = "create_index"; + final String ID = "id"; + { // Testing Creation + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE).create(true)).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().index(new IndexRequest(INDEX).id(docId).source(SOURCE).create(true)).actionGet() + ) + ); + assertDocStatusStats(); + } + } + { // Testing Delete + final String INDEX = "delete_index"; + final String ID = "id"; + { // Testing Deletion + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE)).actionGet(); + updateExpectedDocStatusCounter(response); + + DeleteResponse deleteResponse = client().delete(new DeleteRequest(INDEX, ID)).actionGet(); + updateExpectedDocStatusCounter(deleteResponse); + + MatcherAssert.assertThat(response.getSeqNo(), greaterThanOrEqualTo(0L)); + MatcherAssert.assertThat(deleteResponse.getResult(), equalTo(DocWriteResponse.Result.DELETED)); + assertDocStatusStats(); + } + { // Testing Non-Existing Doc + updateExpectedDocStatusCounter(client().delete(new DeleteRequest(INDEX, "does_not_exist")).actionGet()); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().delete(new DeleteRequest(INDEX, docId).setIfSeqNo(2L).setIfPrimaryTerm(99L)).actionGet() + ) + ); + + assertDocStatusStats(); + } + } + { // Testing Update + final String INDEX = "update_index"; + final String ID = "id"; + { // Testing Not Found + updateExpectedDocStatusCounter( + expectThrows( + DocumentMissingException.class, + () -> client().update(new UpdateRequest(INDEX, ID).doc(SOURCE)).actionGet() + ) + ); + assertDocStatusStats(); + } + { // Testing NoOp Update + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(ID).source(SOURCE)).actionGet()); + + UpdateResponse response = client().update(new UpdateRequest(INDEX, ID).doc(SOURCE)).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.NOOP)); + assertDocStatusStats(); + } + { // Testing Update + final String UPDATED_VALUE = "updated_value"; + UpdateResponse response = client().update(new UpdateRequest(INDEX, ID).doc(singletonMap(FIELD, UPDATED_VALUE))).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.UPDATED)); + assertDocStatusStats(); + } + { // Testing Missing Alias + updateExpectedDocStatusCounter( + expectThrows( + IndexNotFoundException.class, + () -> client().update(new UpdateRequest(INDEX, ID).setRequireAlias(true).doc(new IndexRequest().source(SOURCE))) + .actionGet() + ) + ); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().update(new UpdateRequest(INDEX, docId).doc(SOURCE).setIfSeqNo(2L).setIfPrimaryTerm(99L)).actionGet() + ) + ); + assertDocStatusStats(); + } + } + } + + private void assertDocStatusStats() { + DocStatusStats docStatusStats = client().admin() + .cluster() + .prepareNodesStats() + .execute() + .actionGet() + .getNodes() + .get(0) + .getIndices() + .getIndexing() + .getTotal() + .getDocStatusStats(); + + assertTrue( + Arrays.equals( + docStatusStats.getDocStatusCounter(), + expectedDocStatusStats.getDocStatusCounter(), + Comparator.comparingLong(AtomicLong::longValue) + ) + ); + } + + private void updateExpectedDocStatusCounter(DocWriteResponse r) { + expectedDocStatusStats.inc(r.status()); + } + + private void updateExpectedDocStatusCounter(Exception e) { + expectedDocStatusStats.inc(ExceptionsHelper.status(e)); + } + +} diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index d8b7facc09df2..726ba7ba119af 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -78,7 +78,9 @@ import org.opensearch.index.IndexingPressureService; import org.opensearch.index.VersionType; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; import org.opensearch.indices.IndexClosedException; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.ingest.IngestService; import org.opensearch.node.NodeClosedException; @@ -129,6 +131,7 @@ public class TransportBulkAction extends HandledTransportAction> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List requests = entry.getValue(); @@ -632,8 +650,11 @@ public void onResponse(BulkShardResponse bulkShardResponse) { if (bulkItemResponse.getResponse() != null) { bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); } + + docStatusStats.inc(bulkItemResponse.status()); responses.set(bulkItemResponse.getItemId(), bulkItemResponse); } + if (counter.decrementAndGet() == 0) { finishHim(); } @@ -644,22 +665,24 @@ public void onFailure(Exception e) { // create failures for all relevant requests for (BulkItemRequest request : requests) { final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); - DocWriteRequest docWriteRequest = request.request(); - responses.set( + final DocWriteRequest docWriteRequest = request.request(); + final BulkItemResponse bulkItemResponse = new BulkItemResponse( request.id(), - new BulkItemResponse( - request.id(), - docWriteRequest.opType(), - new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) - ) + docWriteRequest.opType(), + new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) ); + + docStatusStats.inc(bulkItemResponse.status()); + responses.set(request.id(), bulkItemResponse); } + if (counter.decrementAndGet() == 0) { finishHim(); } } private void finishHim() { + indicesService.addDocStatusStats(docStatusStats); listener.onResponse( new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) ); @@ -766,6 +789,10 @@ void executeBulk( final AtomicArray responses, Map indicesThatCannotBeCreated ) { + /* + * We are not wrapping the listener here to capture the response codes for performance benefits. It will + * be saving us an iteration over the responses array + */ new BulkOperation(task, bulkRequest, listener, responses, startTimeNanos, indicesThatCannotBeCreated).run(); } diff --git a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java index 95735f71a38e7..819112eb497f6 100644 --- a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java @@ -32,6 +32,7 @@ package org.opensearch.action.update; +import org.opensearch.ExceptionsHelper; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.action.ActionRunnable; import org.opensearch.action.DocWriteRequest; @@ -62,11 +63,13 @@ import org.opensearch.core.common.io.stream.NotSerializableExceptionWrapper; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaType; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; import org.opensearch.indices.IndicesService; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; @@ -154,10 +157,13 @@ public static void resolveAndValidateRouting(Metadata metadata, String concreteI @Override protected void doExecute(Task task, final UpdateRequest request, final ActionListener listener) { if (request.isRequireAlias() && (clusterService.state().getMetadata().hasAlias(request.index()) == false)) { - throw new IndexNotFoundException( + IndexNotFoundException e = new IndexNotFoundException( "[" + DocWriteRequest.REQUIRE_ALIAS + "] request flag is [true] and [" + request.index() + "] is not an alias", request.index() ); + + incDocStatusStats(e); + throw e; } // if we don't have a master, we don't have metadata, that's fine, let it find a cluster-manager using create index API if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { @@ -193,7 +199,10 @@ public void onFailure(Exception e) { } private void innerExecute(final Task task, final UpdateRequest request, final ActionListener listener) { - super.doExecute(task, request, listener); + super.doExecute(task, request, ActionListener.wrap(listener::onResponse, e -> { + incDocStatusStats(e); + listener.onFailure(e); + })); } @Override @@ -330,7 +339,13 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< shard.noopUpdate(); } } + + DocStatusStats stats = new DocStatusStats(); + stats.inc(RestStatus.OK); + + indicesService.addDocStatusStats(stats); listener.onResponse(update); + break; default: throw new IllegalStateException("Illegal result " + result.getResponseResult()); @@ -361,4 +376,10 @@ private void handleUpdateFailureWithRetry( } listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); } + + private void incDocStatusStats(final Exception e) { + DocStatusStats stats = new DocStatusStats(); + stats.inc(ExceptionsHelper.status(e)); + indicesService.addDocStatusStats(stats); + } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java index 8953ef38da51b..f1abea81a6511 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java @@ -37,6 +37,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -44,6 +45,7 @@ import java.io.IOException; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; /** * Tracks indexing statistics @@ -59,6 +61,89 @@ public class IndexingStats implements Writeable, ToXContentFragment { */ public static class Stats implements Writeable, ToXContentFragment { + /** + * Tracks item level rest category class codes during indexing + * + * @opensearch.internal + */ + public static class DocStatusStats implements Writeable, ToXContentFragment { + + final AtomicLong[] docStatusCounter; + + public DocStatusStats() { + docStatusCounter = new AtomicLong[5]; + for (int i = 0; i < docStatusCounter.length; ++i) { + docStatusCounter[i] = new AtomicLong(0); + } + } + + public DocStatusStats(StreamInput in) throws IOException { + docStatusCounter = in.readArray(i -> new AtomicLong(i.readLong()), AtomicLong[]::new); + + assert docStatusCounter.length == 5 : "Length of incoming array should be 5! Got " + docStatusCounter.length; + } + + /** + * Increment counter for status + * + * @param status {@link RestStatus} + */ + public void inc(final RestStatus status) { + add(status, 1L); + } + + /** + * Increment counter for status by count + * + * @param status {@link RestStatus} + * @param delta The value to add + */ + void add(final RestStatus status, final long delta) { + docStatusCounter[status.getStatusFamilyCode() - 1].addAndGet(delta); + } + + /** + * Accumulate stats from the passed Object + * + * @param stats Instance storing {@link DocStatusStats} + */ + public void add(final DocStatusStats stats) { + if (null == stats) { + return; + } + + for (int i = 0; i < docStatusCounter.length; ++i) { + docStatusCounter[i].addAndGet(stats.docStatusCounter[i].longValue()); + } + } + + public AtomicLong[] getDocStatusCounter() { + return docStatusCounter; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.DOC_STATUS); + + for (int i = 0; i < docStatusCounter.length; ++i) { + long value = docStatusCounter[i].longValue(); + + if (value > 0) { + String key = i + 1 + "xx"; + builder.field(key, value); + } + } + + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeArray((o, v) -> o.writeLong(v.longValue()), docStatusCounter); + } + + } + private long indexCount; private long indexTimeInMillis; private long indexCurrent; @@ -69,8 +154,11 @@ public static class Stats implements Writeable, ToXContentFragment { private long noopUpdateCount; private long throttleTimeInMillis; private boolean isThrottled; + private final DocStatusStats docStatusStats; - Stats() {} + Stats() { + docStatusStats = new DocStatusStats(); + } public Stats(StreamInput in) throws IOException { indexCount = in.readVLong(); @@ -83,6 +171,12 @@ public Stats(StreamInput in) throws IOException { noopUpdateCount = in.readVLong(); isThrottled = in.readBoolean(); throttleTimeInMillis = in.readLong(); + + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + docStatusStats = in.readOptionalWriteable(DocStatusStats::new); + } else { + docStatusStats = null; + } } public Stats( @@ -95,7 +189,8 @@ public Stats( long deleteCurrent, long noopUpdateCount, boolean isThrottled, - long throttleTimeInMillis + long throttleTimeInMillis, + DocStatusStats docStatusStats ) { this.indexCount = indexCount; this.indexTimeInMillis = indexTimeInMillis; @@ -107,6 +202,7 @@ public Stats( this.noopUpdateCount = noopUpdateCount; this.isThrottled = isThrottled; this.throttleTimeInMillis = throttleTimeInMillis; + this.docStatusStats = docStatusStats; } public void add(Stats stats) { @@ -121,8 +217,10 @@ public void add(Stats stats) { noopUpdateCount += stats.noopUpdateCount; throttleTimeInMillis += stats.throttleTimeInMillis; - if (isThrottled != stats.isThrottled) { - isThrottled = true; // When combining if one is throttled set result to throttled. + isThrottled |= stats.isThrottled; // When combining if one is throttled set result to throttled. + + if (getDocStatusStats() != null) { + getDocStatusStats().add(stats.getDocStatusStats()); } } @@ -193,6 +291,10 @@ public long getNoopUpdateCount() { return noopUpdateCount; } + public DocStatusStats getDocStatusStats() { + return docStatusStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(indexCount); @@ -206,6 +308,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isThrottled); out.writeLong(throttleTimeInMillis); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(docStatusStats); + } } @Override @@ -223,8 +328,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.IS_THROTTLED, isThrottled); builder.humanReadableField(Fields.THROTTLED_TIME_IN_MILLIS, Fields.THROTTLED_TIME, getThrottleTime()); + + if (getDocStatusStats() != null) { + getDocStatusStats().toXContent(builder, params); + } + return builder; } + } private final Stats totalStats; @@ -279,7 +390,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par * * @opensearch.internal */ - static final class Fields { + private static final class Fields { static final String INDEXING = "indexing"; static final String INDEX_TOTAL = "index_total"; static final String INDEX_TIME = "index_time"; @@ -294,6 +405,7 @@ static final class Fields { static final String IS_THROTTLED = "is_throttled"; static final String THROTTLED_TIME_IN_MILLIS = "throttle_time_in_millis"; static final String THROTTLED_TIME = "throttle_time"; + static final String DOC_STATUS = "doc_status"; } @Override @@ -303,4 +415,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } } + } diff --git a/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java b/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java index d7e15dd3e40f5..55b65bb4be6d8 100644 --- a/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java @@ -154,7 +154,8 @@ IndexingStats.Stats stats(boolean isThrottled, long currentThrottleMillis) { deleteCurrent.count(), noopUpdates.count(), isThrottled, - TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis) + TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis), + new IndexingStats.Stats.DocStatusStats() ); } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 6f4c51940ce87..a72142e65c5e8 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -133,6 +133,7 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; import org.opensearch.index.store.remote.filecache.FileCacheCleaner; import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; @@ -1058,6 +1059,15 @@ public IndicesQueryCache getIndicesQueryCache() { return indicesQueryCache; } + /** + * Accumulate stats from the passed Object + * + * @param stats Instance storing {@link DocStatusStats} + */ + public void addDocStatusStats(final DocStatusStats stats) { + oldShardsStats.indexingStats.getTotal().getDocStatusStats().add(stats); + } + /** * Statistics for old shards * diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 9ef0f85893fc8..0f67eff26cbde 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -154,6 +154,7 @@ private void indicesThatCannotBeCreatedTestCase( Settings.EMPTY, new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) ), + null, new SystemIndices(emptyMap()) ) { @Override diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 5410d6a88a5b9..515f6eae28a34 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -171,6 +171,7 @@ class TestTransportBulkAction extends TransportBulkAction { SETTINGS, new ClusterService(SETTINGS, new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) ), + null, new SystemIndices(emptyMap()) ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java index 098bd8e8d8cfe..10cad6fb147a2 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java @@ -60,6 +60,7 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.VersionType; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; import org.opensearch.telemetry.tracing.noop.NoopTracer; @@ -88,6 +89,7 @@ import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; public class TransportBulkActionTests extends OpenSearchTestCase { @@ -115,6 +117,7 @@ class TestTransportBulkAction extends TransportBulkAction { new Resolver(), new AutoCreateIndex(Settings.EMPTY, clusterService.getClusterSettings(), new Resolver(), new SystemIndices(emptyMap())), new IndexingPressureService(Settings.EMPTY, clusterService), + mock(IndicesService.class), new SystemIndices(emptyMap()) ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java index 829eee45cac5b..852e3837e1e7a 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java @@ -280,6 +280,7 @@ static class TestTransportBulkAction extends TransportBulkAction { indexNameExpressionResolver, autoCreateIndex, new IndexingPressureService(Settings.EMPTY, clusterService), + null, new SystemIndices(emptyMap()), relativeTimeProvider ); diff --git a/server/src/test/java/org/opensearch/core/RestStatusTests.java b/server/src/test/java/org/opensearch/core/RestStatusTests.java new file mode 100644 index 0000000000000..f8dba99aa8b60 --- /dev/null +++ b/server/src/test/java/org/opensearch/core/RestStatusTests.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.core; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.PriorityQueue; + +public class RestStatusTests extends OpenSearchTestCase { + + public void testStatusReturns200ForNoFailures() { + int totalShards = randomIntBetween(1, 100); + int successfulShards = randomIntBetween(1, totalShards); + + assertEquals(RestStatus.OK, RestStatus.status(successfulShards, totalShards)); + } + + public void testStatusReturns503ForUnavailableShards() { + int totalShards = randomIntBetween(1, 100); + int successfulShards = 0; + + assertEquals(RestStatus.SERVICE_UNAVAILABLE, RestStatus.status(successfulShards, totalShards)); + } + + public void testStatusReturnsFailureStatusWhenFailuresExist() { + int totalShards = randomIntBetween(1, 100); + int successfulShards = 0; + + TestException[] failures = new TestException[totalShards]; + PriorityQueue heapOfFailures = new PriorityQueue<>((x, y) -> y.status().compareTo(x.status())); + + for (int i = 0; i < totalShards; ++i) { + /* + * Status here doesn't need to convey failure and is not as per rest + * contract. We're not testing the contract, but if status() returns + * the greatest rest code from the failures selection + */ + RestStatus status = randomFrom(RestStatus.values()); + TestException failure = new TestException(status); + + failures[i] = failure; + heapOfFailures.add(failure); + } + + assertEquals(heapOfFailures.peek().status(), RestStatus.status(successfulShards, totalShards, failures)); + } + + public void testSerialization() throws IOException { + final RestStatus status = randomFrom(RestStatus.values()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + RestStatus.writeTo(out, status); + + try (StreamInput in = out.bytes().streamInput()) { + RestStatus deserializedStatus = RestStatus.readFrom(in); + + assertEquals(status, deserializedStatus); + } + } + } + + private static class TestException extends ShardOperationFailedException { + TestException(final RestStatus status) { + super("super-idx", randomInt(), "gone-fishing", status, new Throwable("cake")); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new IOException("not implemented"); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + throw new IOException("not implemented"); + } + } + +} diff --git a/server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java b/server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java new file mode 100644 index 0000000000000..acf482552c260 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java @@ -0,0 +1,141 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.concurrent.atomic.AtomicLong; + +public class IndexingStatsTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + IndexingStats stats = createTestInstance(); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + stats.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + IndexingStats deserializedStats = new IndexingStats(in); + + if (stats.getTotal() == null) { + assertNull(deserializedStats.getTotal()); + return; + } + + IndexingStats.Stats totalStats = stats.getTotal(); + IndexingStats.Stats deserializedTotalStats = deserializedStats.getTotal(); + + assertEquals(totalStats.getIndexCount(), deserializedTotalStats.getIndexCount()); + assertEquals(totalStats.getIndexTime(), deserializedTotalStats.getIndexTime()); + assertEquals(totalStats.getIndexCurrent(), deserializedTotalStats.getIndexCurrent()); + assertEquals(totalStats.getIndexFailedCount(), deserializedTotalStats.getIndexFailedCount()); + assertEquals(totalStats.getDeleteCount(), deserializedTotalStats.getDeleteCount()); + assertEquals(totalStats.getDeleteTime(), deserializedTotalStats.getDeleteTime()); + assertEquals(totalStats.getDeleteCurrent(), deserializedTotalStats.getDeleteCurrent()); + assertEquals(totalStats.getNoopUpdateCount(), deserializedTotalStats.getNoopUpdateCount()); + assertEquals(totalStats.isThrottled(), deserializedTotalStats.isThrottled()); + assertEquals(totalStats.getThrottleTime(), deserializedTotalStats.getThrottleTime()); + + if (totalStats.getDocStatusStats() == null) { + assertNull(deserializedTotalStats.getDocStatusStats()); + return; + } + + IndexingStats.Stats.DocStatusStats docStatusStats = totalStats.getDocStatusStats(); + IndexingStats.Stats.DocStatusStats deserializedDocStatusStats = deserializedTotalStats.getDocStatusStats(); + + assertTrue( + Arrays.equals( + docStatusStats.getDocStatusCounter(), + deserializedDocStatusStats.getDocStatusCounter(), + Comparator.comparingLong(AtomicLong::longValue) + ) + ); + } + } + } + + public void testToXContentForIndexingStats() throws IOException { + IndexingStats stats = createTestInstance(); + IndexingStats.Stats totalStats = stats.getTotal(); + AtomicLong[] counter = totalStats.getDocStatusStats().getDocStatusCounter(); + + String expected = "{\"indexing\":{\"index_total\":" + + totalStats.getIndexCount() + + ",\"index_time_in_millis\":" + + totalStats.getIndexTime().getMillis() + + ",\"index_current\":" + + totalStats.getIndexCurrent() + + ",\"index_failed\":" + + totalStats.getIndexFailedCount() + + ",\"delete_total\":" + + totalStats.getDeleteCount() + + ",\"delete_time_in_millis\":" + + totalStats.getDeleteTime().getMillis() + + ",\"delete_current\":" + + totalStats.getDeleteCurrent() + + ",\"noop_update_total\":" + + totalStats.getNoopUpdateCount() + + ",\"is_throttled\":" + + totalStats.isThrottled() + + ",\"throttle_time_in_millis\":" + + totalStats.getThrottleTime().getMillis() + + ",\"doc_status\":{\"1xx\":" + + counter[0] + + ",\"2xx\":" + + counter[1] + + ",\"3xx\":" + + counter[2] + + ",\"4xx\":" + + counter[3] + + ",\"5xx\":" + + counter[4] + + "}}}"; + + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + xContentBuilder.startObject(); + xContentBuilder = stats.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + xContentBuilder.endObject(); + + assertEquals(expected, xContentBuilder.toString()); + } + + private IndexingStats createTestInstance() { + IndexingStats.Stats.DocStatusStats docStatusStats = new IndexingStats.Stats.DocStatusStats(); + for (int i = 1; i < 6; ++i) { + docStatusStats.add(RestStatus.fromCode(i * 100), randomNonNegativeLong()); + } + + IndexingStats.Stats stats = new IndexingStats.Stats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomBoolean(), + randomNonNegativeLong(), + docStatusStats + ); + + return new IndexingStats(stats); + } + +} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 2b432906ee128..09f5c1bea1a5e 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2210,6 +2210,7 @@ public void onFailure(final Exception e) { indexNameExpressionResolver, new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver, new SystemIndices(emptyMap())), new IndexingPressureService(settings, clusterService), + mock(IndicesService.class), new SystemIndices(emptyMap()) ) ); From 7dc6683838951e1a211c6ff634d9eea93e2be3c2 Mon Sep 17 00:00:00 2001 From: Andrey Pleskach Date: Thu, 28 Sep 2023 23:56:28 +0300 Subject: [PATCH 03/14] Fixed changelog for BC update (#10233) Signed-off-by: Andrey Pleskach --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0b3ce261d517..3d1ca935193c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -105,6 +105,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.0 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208)) - Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) - Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) From 61495bf0a915216f623d51807aeb3353caffd296 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Fri, 29 Sep 2023 13:40:02 -0700 Subject: [PATCH 04/14] Refactor async blob read to avoid blocking calls, support non multipart calls (#10192) Signed-off-by: Kunal Kotwani --- .../repositories/s3/S3BlobContainer.java | 89 +++++++----- .../s3/S3BlobStoreContainerTests.java | 129 +++++++++++++++--- 2 files changed, 163 insertions(+), 55 deletions(-) diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 2911a018df337..c6ae58371e15c 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -228,35 +228,50 @@ public void readBlobAsync(String blobName, ActionListener listener) try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { final S3AsyncClient s3AsyncClient = amazonS3Reference.get().client(); final String bucketName = blobStore.bucket(); + final String blobKey = buildKey(blobName); - final GetObjectAttributesResponse blobMetadata = getBlobMetadata(s3AsyncClient, bucketName, blobName).get(); + final CompletableFuture blobMetadataFuture = getBlobMetadata(s3AsyncClient, bucketName, blobKey); - final long blobSize = blobMetadata.objectSize(); - final int numberOfParts = blobMetadata.objectParts().totalPartsCount(); - final String blobChecksum = blobMetadata.checksum().checksumCRC32(); - - final List blobPartStreams = new ArrayList<>(); - final List> blobPartInputStreamFutures = new ArrayList<>(); - // S3 multipart files use 1 to n indexing - for (int partNumber = 1; partNumber <= numberOfParts; partNumber++) { - blobPartInputStreamFutures.add(getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, partNumber)); - } - - CompletableFuture.allOf(blobPartInputStreamFutures.toArray(CompletableFuture[]::new)).whenComplete((unused, throwable) -> { - if (throwable == null) { - listener.onResponse( - new ReadContext( - blobSize, - blobPartInputStreamFutures.stream().map(CompletableFuture::join).collect(Collectors.toList()), - blobChecksum - ) - ); - } else { + blobMetadataFuture.whenComplete((blobMetadata, throwable) -> { + if (throwable != null) { Exception ex = throwable.getCause() instanceof Exception ? (Exception) throwable.getCause() : new Exception(throwable.getCause()); listener.onFailure(ex); + return; + } + + final List> blobPartInputStreamFutures = new ArrayList<>(); + final long blobSize = blobMetadata.objectSize(); + final Integer numberOfParts = blobMetadata.objectParts() == null ? null : blobMetadata.objectParts().totalPartsCount(); + final String blobChecksum = blobMetadata.checksum().checksumCRC32(); + + if (numberOfParts == null) { + blobPartInputStreamFutures.add(getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, null)); + } else { + // S3 multipart files use 1 to n indexing + for (int partNumber = 1; partNumber <= numberOfParts; partNumber++) { + blobPartInputStreamFutures.add(getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, partNumber)); + } } + + CompletableFuture.allOf(blobPartInputStreamFutures.toArray(CompletableFuture[]::new)) + .whenComplete((unused, partThrowable) -> { + if (partThrowable == null) { + listener.onResponse( + new ReadContext( + blobSize, + blobPartInputStreamFutures.stream().map(CompletableFuture::join).collect(Collectors.toList()), + blobChecksum + ) + ); + } else { + Exception ex = partThrowable.getCause() instanceof Exception + ? (Exception) partThrowable.getCause() + : new Exception(partThrowable.getCause()); + listener.onFailure(ex); + } + }); }); } catch (Exception ex) { listener.onFailure(SdkException.create("Error occurred while fetching blob parts from the repository", ex)); @@ -685,41 +700,47 @@ static Tuple numberOfMultiparts(final long totalSize, final long par * the stream and its related metadata. * @param s3AsyncClient Async client to be utilized to fetch the object part * @param bucketName Name of the S3 bucket - * @param blobName Identifier of the blob for which the parts will be fetched - * @param partNumber Part number for the blob to be retrieved + * @param blobKey Identifier of the blob for which the parts will be fetched + * @param partNumber Optional part number for the blob to be retrieved * @return A future of {@link InputStreamContainer} containing the stream and stream metadata. */ CompletableFuture getBlobPartInputStreamContainer( S3AsyncClient s3AsyncClient, String bucketName, - String blobName, - int partNumber + String blobKey, + @Nullable Integer partNumber ) { - final GetObjectRequest.Builder getObjectRequestBuilder = GetObjectRequest.builder() - .bucket(bucketName) - .key(blobName) - .partNumber(partNumber); + final boolean isMultipartObject = partNumber != null; + final GetObjectRequest.Builder getObjectRequestBuilder = GetObjectRequest.builder().bucket(bucketName).key(blobKey); + + if (isMultipartObject) { + getObjectRequestBuilder.partNumber(partNumber); + } return SocketAccess.doPrivileged( () -> s3AsyncClient.getObject(getObjectRequestBuilder.build(), AsyncResponseTransformer.toBlockingInputStream()) - .thenApply(S3BlobContainer::transformResponseToInputStreamContainer) + .thenApply(response -> transformResponseToInputStreamContainer(response, isMultipartObject)) ); } /** * Transforms the stream response object from S3 into an {@link InputStreamContainer} * @param streamResponse Response stream object from S3 + * @param isMultipartObject Flag to denote a multipart object response * @return {@link InputStreamContainer} containing the stream and stream metadata */ // Package-Private for testing. - static InputStreamContainer transformResponseToInputStreamContainer(ResponseInputStream streamResponse) { + static InputStreamContainer transformResponseToInputStreamContainer( + ResponseInputStream streamResponse, + boolean isMultipartObject + ) { final GetObjectResponse getObjectResponse = streamResponse.response(); final String contentRange = getObjectResponse.contentRange(); final Long contentLength = getObjectResponse.contentLength(); - if (contentRange == null || contentLength == null) { + if ((isMultipartObject && contentRange == null) || contentLength == null) { throw SdkException.builder().message("Failed to fetch required metadata for blob part").build(); } - final Long offset = HttpRangeUtils.getStartOffsetFromRangeHeader(getObjectResponse.contentRange()); + final long offset = isMultipartObject ? HttpRangeUtils.getStartOffsetFromRangeHeader(getObjectResponse.contentRange()) : 0L; return new InputStreamContainer(streamResponse, getObjectResponse.contentLength(), offset); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index a87c060dcc60a..9817d7cd520ef 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -81,7 +81,6 @@ import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.unit.ByteSizeUnit; -import org.opensearch.repositories.s3.async.AsyncTransferManager; import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayInputStream; @@ -100,7 +99,6 @@ import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -919,7 +917,7 @@ public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanNumberO testListBlobsByPrefixInLexicographicOrder(12, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } - public void testReadBlobAsync() throws Exception { + public void testReadBlobAsyncMultiPart() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); final String blobName = randomAlphaOfLengthBetween(1, 10); final String checksum = randomAlphaOfLength(10); @@ -932,11 +930,7 @@ public void testReadBlobAsync() throws Exception { final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) ); - final AsyncTransferManager asyncTransferManager = new AsyncTransferManager( - 10000L, - mock(ExecutorService.class), - mock(ExecutorService.class) - ); + final S3BlobStore blobStore = mock(S3BlobStore.class); final BlobPath blobPath = new BlobPath(); @@ -944,7 +938,6 @@ public void testReadBlobAsync() throws Exception { when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); when(blobStore.serverSideEncryption()).thenReturn(false); when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); - when(blobStore.getAsyncTransferManager()).thenReturn(asyncTransferManager); CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); getObjectAttributesResponseCompletableFuture.complete( @@ -984,6 +977,60 @@ public void testReadBlobAsync() throws Exception { } } + public void testReadBlobAsyncSinglePart() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final int objectSize = 100; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) + ); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + + CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize((long) objectSize) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + mockObjectResponse(s3AsyncClient, bucketName, blobName, objectSize); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(1, readContextActionListener.getResponseCount()); + assertEquals(0, readContextActionListener.getFailureCount()); + ReadContext readContext = readContextActionListener.getResponse(); + assertEquals(1, readContext.getNumberOfParts()); + assertEquals(checksum, readContext.getBlobChecksum()); + assertEquals(objectSize, readContext.getBlobSize()); + + InputStreamContainer inputStreamContainer = readContext.getPartStreams().stream().findFirst().get(); + assertEquals(objectSize, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + assertEquals(objectSize, inputStreamContainer.getInputStream().readAllBytes().length); + + } + public void testReadBlobAsyncFailure() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); final String blobName = randomAlphaOfLengthBetween(1, 10); @@ -996,11 +1043,7 @@ public void testReadBlobAsyncFailure() throws Exception { final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) ); - final AsyncTransferManager asyncTransferManager = new AsyncTransferManager( - 10000L, - mock(ExecutorService.class), - mock(ExecutorService.class) - ); + final S3BlobStore blobStore = mock(S3BlobStore.class); final BlobPath blobPath = new BlobPath(); @@ -1008,7 +1051,6 @@ public void testReadBlobAsyncFailure() throws Exception { when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); when(blobStore.serverSideEncryption()).thenReturn(false); when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); - when(blobStore.getAsyncTransferManager()).thenReturn(asyncTransferManager); CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); getObjectAttributesResponseCompletableFuture.complete( @@ -1071,7 +1113,7 @@ public void testGetBlobPartInputStream() throws Exception { final String blobName = randomAlphaOfLengthBetween(1, 10); final String bucketName = randomAlphaOfLengthBetween(1, 10); final long contentLength = 10L; - final String contentRange = "bytes 0-10/100"; + final String contentRange = "bytes 10-20/100"; final InputStream inputStream = ResponseInputStream.nullInputStream(); final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); @@ -1095,9 +1137,17 @@ public void testGetBlobPartInputStream() throws Exception { ) ).thenReturn(getObjectPartResponse); + // Header based offset in case of a multi part object request InputStreamContainer inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, 0) .get(); + assertEquals(10, inputStreamContainer.getOffset()); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); + + // 0 offset in case of a single part object request + inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, null).get(); + assertEquals(0, inputStreamContainer.getOffset()); assertEquals(contentLength, inputStreamContainer.getContentLength()); assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); @@ -1108,28 +1158,65 @@ public void testTransformResponseToInputStreamContainer() throws Exception { final long contentLength = 10L; final InputStream inputStream = ResponseInputStream.nullInputStream(); - final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); - GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength(contentLength).build(); + // Exception when content range absent for multipart object ResponseInputStream responseInputStreamNoRange = new ResponseInputStream<>(getObjectResponse, inputStream); - assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoRange)); + assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoRange, true)); + + // No exception when content range absent for single part object + ResponseInputStream responseInputStreamNoRangeSinglePart = new ResponseInputStream<>( + getObjectResponse, + inputStream + ); + InputStreamContainer inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer( + responseInputStreamNoRangeSinglePart, + false + ); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + // Exception when length is absent getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).build(); ResponseInputStream responseInputStreamNoContentLength = new ResponseInputStream<>( getObjectResponse, inputStream ); - assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoContentLength)); + assertThrows( + SdkException.class, + () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoContentLength, true) + ); + // No exception when range and length both are present getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).contentLength(contentLength).build(); ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); - InputStreamContainer inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer(responseInputStream); + inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer(responseInputStream, true); assertEquals(contentLength, inputStreamContainer.getContentLength()); assertEquals(0, inputStreamContainer.getOffset()); assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); } + private void mockObjectResponse(S3AsyncClient s3AsyncClient, String bucketName, String blobName, int objectSize) { + + final InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(objectSize)); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength((long) objectSize).build(); + + CompletableFuture> getObjectPartResponse = new CompletableFuture<>(); + ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + getObjectPartResponse.complete(responseInputStream); + + GetObjectRequest getObjectRequest = GetObjectRequest.builder().bucket(bucketName).key(blobName).build(); + + when( + s3AsyncClient.getObject( + eq(getObjectRequest), + ArgumentMatchers.>>any() + ) + ).thenReturn(getObjectPartResponse); + + } + private void mockObjectPartResponse( S3AsyncClient s3AsyncClient, String bucketName, From be247669c4d5a4494958a508c2fcde4e36c48499 Mon Sep 17 00:00:00 2001 From: Sagar <99425694+sgup432@users.noreply.github.com> Date: Fri, 29 Sep 2023 22:21:37 -0700 Subject: [PATCH 05/14] [Search latency - Coordinator] Changing version check to 2.11 (#10280) * [Search latency - Coordinator] Changing version check to 2.11 Signed-off-by: Sagar Upadhyaya * [Port main] update version check as per v2.11.0 [Backport 2.x] Indexing: add Doc status counter (#10267) * Indexing: add Doc Status Counter (#8716) Currently, Opensearch returns a 200 OK response code for a Bulk API call, even though there can be partial/complete failures within the request E2E. This provides doc level stats with respect to the rest status code as 2xx, 4xx, 5xx etc. Signed-off-by: Rohit Ashiwal (cherry picked from commit d656e3db592f29466d35452867caa241f5429485) Signed-off-by: github-actions[bot] (cherry picked from commit 94173e3f8b343ba29db24e5682d1706bbda9f9a4) Signed-off-by: Rohit Ashiwal Signed-off-by: Sagar Upadhyaya --------- Signed-off-by: Sagar Upadhyaya Signed-off-by: Rohit Ashiwal Signed-off-by: github-actions[bot] --- .../rest-api-spec/test/nodes.stats/11_indices_metrics.yml | 4 ++-- .../java/org/opensearch/index/search/stats/SearchStats.java | 4 ++-- .../main/java/org/opensearch/index/shard/IndexingStats.java | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 3f79227ce64e8..784c7b52b18b4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -141,8 +141,8 @@ --- "Metric - indexing doc_status": - skip: - version: " - 2.99.99" - reason: "To be introduced in future release :: TODO: change if/when we backport to 2.x" + version: " - 2.10.99" + reason: "Doc Status Stats were introduced in v2.11.0" - do: nodes.info: {} - set: diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index 14aaf7e58a59c..1f9144b28f286 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -242,7 +242,7 @@ private Stats(StreamInput in) throws IOException { pitCurrent = in.readVLong(); } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { this.requestStatsLongHolder = new RequestStatsLongHolder(); requestStatsLongHolder.requestStatsHolder = in.readMap(StreamInput::readString, PhaseStatsLongHolder::new); } @@ -437,7 +437,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(pitCurrent); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { if (requestStatsLongHolder == null) { requestStatsLongHolder = new RequestStatsLongHolder(); } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java index f1abea81a6511..89cbc59403faf 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java @@ -172,7 +172,7 @@ public Stats(StreamInput in) throws IOException { isThrottled = in.readBoolean(); throttleTimeInMillis = in.readLong(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { docStatusStats = in.readOptionalWriteable(DocStatusStats::new); } else { docStatusStats = null; @@ -308,7 +308,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isThrottled); out.writeLong(throttleTimeInMillis); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { out.writeOptionalWriteable(docStatusStats); } } From e1565821a28109c5c7f773e77764fc5104de7f88 Mon Sep 17 00:00:00 2001 From: Heemin Kim Date: Sat, 30 Sep 2023 09:02:49 -0700 Subject: [PATCH 06/14] Pass parent filter to inner query in nested query (#10246) Pass parent filter to inner query so that inner query can utilize the information Signed-off-by: Heemin Kim --- CHANGELOG.md | 1 + .../index/query/NestedQueryBuilder.java | 3 +++ .../index/query/QueryShardContext.java | 9 +++++++++ .../index/query/NestedQueryBuilderTests.java | 20 +++++++++++++++++++ 4 files changed, 33 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d1ca935193c3..f6c29d9b64f68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -115,6 +115,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) - Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) - [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) +- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java index 33fe96597b4f8..ac4fde7f06b16 100644 --- a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java @@ -318,10 +318,13 @@ protected Query doToQuery(QueryShardContext context) throws IOException { parentFilter = context.bitsetFilter(objectMapper.nestedTypeFilter()); } + BitSetProducer previousParentFilter = context.getParentFilter(); try { + context.setParentFilter(parentFilter); context.nestedScope().nextLevel(nestedObjectMapper); innerQuery = this.query.toQuery(context); } finally { + context.setParentFilter(previousParentFilter); context.nestedScope().previousLevel(); } diff --git a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java index 0bf05e633bba3..701484fbc8dc3 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java @@ -115,6 +115,7 @@ public class QueryShardContext extends QueryRewriteContext { private boolean mapUnmappedFieldAsString; private NestedScope nestedScope; private final ValuesSourceRegistry valuesSourceRegistry; + private BitSetProducer parentFilter; public QueryShardContext( int shardId, @@ -622,4 +623,12 @@ public BitsetFilterCache getBitsetFilterCache() { public AggregationUsageService getUsageService() { return valuesSourceRegistry.getUsageService(); } + + public BitSetProducer getParentFilter() { + return parentFilter; + } + + public void setParentFilter(BitSetProducer parentFilter) { + this.parentFilter = parentFilter; + } } diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index 0d66654a70f08..62337264bc0b1 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -66,6 +66,8 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class NestedQueryBuilderTests extends AbstractQueryTestCase { @@ -411,4 +413,22 @@ public void testDisallowExpensiveQueries() { OpenSearchException e = expectThrows(OpenSearchException.class, () -> queryBuilder.toQuery(queryShardContext)); assertEquals("[joining] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", e.getMessage()); } + + public void testSetParentFilterInContext() throws Exception { + QueryShardContext queryShardContext = createShardContext(); + QueryBuilder innerQueryBuilder = spy(new MatchAllQueryBuilderTests().createTestQueryBuilder()); + when(innerQueryBuilder.toQuery(queryShardContext)).thenAnswer(invoke -> { + QueryShardContext context = invoke.getArgument(0); + if (context.getParentFilter() == null) { + throw new Exception("Expect parent filter to be non-null"); + } + return invoke.callRealMethod(); + }); + NestedQueryBuilder nqb = new NestedQueryBuilder("nested1", innerQueryBuilder, RandomPicks.randomFrom(random(), ScoreMode.values())); + + assertNull(queryShardContext.getParentFilter()); + nqb.rewrite(queryShardContext).toQuery(queryShardContext); + assertNull(queryShardContext.getParentFilter()); + verify(innerQueryBuilder).toQuery(queryShardContext); + } } From 797def6e50389d88634b6ca614ceaba407ee983f Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Sat, 30 Sep 2023 18:45:30 -0700 Subject: [PATCH 07/14] Upgrade Lucene to 9.8.0 (#10276) 9.8.0 was officially released this morning: https://lucene.apache.org/core/9_8_0/changes/Changes.html Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - libs/core/licenses/lucene-core-9.8.0.jar.sha1 | 1 + .../licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 | 1 + .../lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 | 1 + .../lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 | 1 + .../lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 | 1 + .../lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 | 1 + .../lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 | 1 + .../lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../licenses/lucene-analysis-stempel-9.8.0.jar.sha1 | 1 + .../lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 | 1 + .../lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-analysis-common-9.8.0.jar.sha1 | 1 + .../lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 | 1 + server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-core-9.8.0.jar.sha1 | 1 + server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-grouping-9.8.0.jar.sha1 | 1 + .../licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-highlighter-9.8.0.jar.sha1 | 1 + server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-join-9.8.0.jar.sha1 | 1 + server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-memory-9.8.0.jar.sha1 | 1 + server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-misc-9.8.0.jar.sha1 | 1 + server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-queries-9.8.0.jar.sha1 | 1 + .../licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-queryparser-9.8.0.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.8.0.jar.sha1 | 1 + .../lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 | 1 + .../licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-spatial3d-9.8.0.jar.sha1 | 1 + server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-suggest-9.8.0.jar.sha1 | 1 + 48 files changed, 25 insertions(+), 24 deletions(-) delete mode 100644 libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 libs/core/licenses/lucene-core-9.8.0.jar.sha1 delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-core-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-join-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.8.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index f6c29d9b64f68..772198d5d0544 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -107,6 +107,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` - Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` - Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 71b3e267700b1..dae68940a7b7f 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.8.0-snapshot-95cdd2e +lucene = 9.8.0 bundled_jdk_vendor = adoptium bundled_jdk = 20.0.2+9 diff --git a/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 70baf1270cd5d..0000000000000 --- a/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2f7fbc5b2c49ca777a169d579f41082a9a57cc7 \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.8.0.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..f9a3e2f3cbee6 --- /dev/null +++ b/libs/core/licenses/lucene-core-9.8.0.jar.sha1 @@ -0,0 +1 @@ +5e8421c5f8573bcf22e9265fc7e19469545a775a \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c1daa91dd5433..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57e2b0cca55da8ad856dfd60be42e6daabbc98c3 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..892865a017f48 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 @@ -0,0 +1 @@ +7725476acfcb9bdfeff1b813ce15c39c6b857dc2 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 035b47c5f388c..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0deb3b85eadf831be17b48acab0785fd9d34fc44 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..ef410899981ca --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 @@ -0,0 +1 @@ +7133d34e92770f59eb28686f4d511b9f3f32e970 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 6ff5a433f0a4e..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a204267d68ce4ba36bfddc366cd6865cf5e1378 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..46b83c9e40b3a --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 @@ -0,0 +1 @@ +be44282e1f6b91a0650fcceb558053d6bdd4863d \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index a65ab33a31e2a..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -71e8e811f873ba2b47c7ecf9d890cbeac5b6be41 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..36664695a7818 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 @@ -0,0 +1 @@ +bd1f80d33346f7e588685484ef29a304db5190e4 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 04ab7b7e7adb8..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6e1274273895365bd83391cc4b79f5264479f5de \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..003ccdf8b0727 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 @@ -0,0 +1 @@ +b9ffdc7a52d2087ecb03318ec06305b480cdfe82 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index cef3f97d03c51..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e634c8685edad2bdb5c13748b18c0c1a46bb63a3 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..e22eaa474016f --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 @@ -0,0 +1 @@ +f73e2007b133fb699e517ef13b4952844f0150d8 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 3e2dd19a9dd85..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0afdf2afacbae39414ed06325fbb4bed17c07a7d \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..1ebe42a2a2f56 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 @@ -0,0 +1 @@ +2c09cbc021a8f81a01600a1d2a999361e70f7aed \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 8c0544acd1ca0..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -166e2ea297182f7bf7070af02aacea9e6a3a19c8 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..3c4523d45c0f5 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 @@ -0,0 +1 @@ +b054f2c7b11fc7c5601b4c3cdf18aa7508612898 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 4ac89f2e792d7..0000000000000 --- a/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c82be3d997d781bb72d6d0eadade064dd2cd6db \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..6ad304fa52c12 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 @@ -0,0 +1 @@ +36f0363325ca7bf62c180160d1ed5165c7c37795 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 624b5174a444f..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c261d17c681c0d91171c67e192abfef59adea2e \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..f104c4207d390 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 @@ -0,0 +1 @@ +e98fb408028f40170e6d87c16422bfdc0bb2e392 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 70baf1270cd5d..0000000000000 --- a/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2f7fbc5b2c49ca777a169d579f41082a9a57cc7 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0.jar.sha1 b/server/licenses/lucene-core-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..f9a3e2f3cbee6 --- /dev/null +++ b/server/licenses/lucene-core-9.8.0.jar.sha1 @@ -0,0 +1 @@ +5e8421c5f8573bcf22e9265fc7e19469545a775a \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 20ddb9ae3ef27..0000000000000 --- a/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d1cf3d6db43fad6630376ba59451f848f4d387c \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0.jar.sha1 b/server/licenses/lucene-grouping-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..ab132121b2edc --- /dev/null +++ b/server/licenses/lucene-grouping-9.8.0.jar.sha1 @@ -0,0 +1 @@ +d39184518351178c404ed9669fc6cb6111f2288d \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c3ad03ca53b13..0000000000000 --- a/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -83ab97638bb5269f950d75bba5675d3cfb63f2fa \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..c7cb678fb7b72 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.8.0.jar.sha1 @@ -0,0 +1 @@ +1ac38c8278dbd63dfab30744a41dd955a415a31c \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c2a4c5334b314..0000000000000 --- a/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97c26362151908dc892263edda3872abbacb71a8 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0.jar.sha1 b/server/licenses/lucene-join-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..2b6cb8af4faf6 --- /dev/null +++ b/server/licenses/lucene-join-9.8.0.jar.sha1 @@ -0,0 +1 @@ +3d64fc57bb6e718d906413a9f73c713e6d4d8bb0 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 32534d07e47dc..0000000000000 --- a/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8337eddc0dddd0d7dd50c5aa0d17e5e31592f9fa \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0.jar.sha1 b/server/licenses/lucene-memory-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..5fdfee401dd0a --- /dev/null +++ b/server/licenses/lucene-memory-9.8.0.jar.sha1 @@ -0,0 +1 @@ +5283ac71d6ccecb5e00c7b52df2faec012f2625a \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 7db245cc521c7..0000000000000 --- a/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a2e3fae930295f0e2b401effe04eafc25692a414 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0.jar.sha1 b/server/licenses/lucene-misc-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..cf815cba15862 --- /dev/null +++ b/server/licenses/lucene-misc-9.8.0.jar.sha1 @@ -0,0 +1 @@ +9a57b049cf51a5e9c9c1909c420f645f1b6f9a54 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index d01a6d733196e..0000000000000 --- a/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e88d8a464e6cfa345b946c9c8822ba7ee2a9159f \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0.jar.sha1 b/server/licenses/lucene-queries-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..09f369ef18e12 --- /dev/null +++ b/server/licenses/lucene-queries-9.8.0.jar.sha1 @@ -0,0 +1 @@ +628db4ef46f1c6a05145bdac1d1bc4ace6341b13 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c7b9640bad170..0000000000000 --- a/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9905790675c01e8dc24f9a5e6b9b28b879c65a52 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..2a42a8956b18b --- /dev/null +++ b/server/licenses/lucene-queryparser-9.8.0.jar.sha1 @@ -0,0 +1 @@ +982faf2bfa55542bf57fbadef54c19ac00f57cae \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c4cd9e47624f8..0000000000000 --- a/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6c8be427ec8ffc7e8233ffbf0d190d95a56cf14 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..64a0b07f72d29 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.8.0.jar.sha1 @@ -0,0 +1 @@ +06493dbd14d02537716822254866a94458f4d842 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index dfee145d3ea26..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -11716d61288feaa692593bf699affa8de2b564c4 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..d1bcb0581435c --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 @@ -0,0 +1 @@ +9d9a731822ad6eefa1ba288a0c158d478522f165 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c7410086ba86c..0000000000000 --- a/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a888e06c0535403b9e58a8dcddeb5e6513a4930 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..d17459cc569a9 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 @@ -0,0 +1 @@ +ce752a52b2d4eac90633c7df7982e29504f99e76 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 6d8d4205f4d02..0000000000000 --- a/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52dfc8bf135ed29f5baf0a967c1bb63dedb9a069 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0.jar.sha1 b/server/licenses/lucene-suggest-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..ff47b87672d2c --- /dev/null +++ b/server/licenses/lucene-suggest-9.8.0.jar.sha1 @@ -0,0 +1 @@ +f977f96f2093b7fddea6b67caa2e1c5b10edebf6 \ No newline at end of file From 6003560f944d5e63943d13c679797787458f1b5f Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 2 Oct 2023 13:04:38 -0400 Subject: [PATCH 08/14] Bump asm from 9.5 to 9.6 (#10302) Signed-off-by: Andriy Redko --- CHANGELOG.md | 9 +++++---- buildSrc/version.properties | 2 +- modules/lang-expression/licenses/asm-9.5.jar.sha1 | 1 - modules/lang-expression/licenses/asm-9.6.jar.sha1 | 1 + .../lang-expression/licenses/asm-commons-9.5.jar.sha1 | 1 - .../lang-expression/licenses/asm-commons-9.6.jar.sha1 | 1 + modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 | 1 - modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 | 1 + modules/lang-painless/licenses/asm-9.5.jar.sha1 | 1 - modules/lang-painless/licenses/asm-9.6.jar.sha1 | 1 + modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 | 1 - modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 | 1 + modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 | 1 - modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 | 1 + modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 | 1 - modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 | 1 + modules/lang-painless/licenses/asm-util-9.5.jar.sha1 | 1 - modules/lang-painless/licenses/asm-util-9.6.jar.sha1 | 1 + 18 files changed, 14 insertions(+), 13 deletions(-) delete mode 100644 modules/lang-expression/licenses/asm-9.5.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-9.6.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-9.5.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-9.6.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-util-9.5.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-util-9.6.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 772198d5d0544..e1834df696769 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -104,10 +104,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.4 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.0 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208)) - Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump Lucene from 9.7.0 to 9.8.0 ([#10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index dae68940a7b7f..243a1b2c6f57e 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -16,7 +16,7 @@ icu4j = 70.1 supercsv = 2.4.0 log4j = 2.20.0 slf4j = 1.7.36 -asm = 9.5 +asm = 9.6 jettison = 1.5.4 woodstox = 6.4.0 kotlin = 1.7.10 diff --git a/modules/lang-expression/licenses/asm-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-9.5.jar.sha1 deleted file mode 100644 index ea4aa3581dc87..0000000000000 --- a/modules/lang-expression/licenses/asm-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc6ea1875f4d64fbc85e1691c95b96a3d8569c90 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-9.6.jar.sha1 new file mode 100644 index 0000000000000..2d9e6a9d3cfd6 --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.6.jar.sha1 @@ -0,0 +1 @@ +aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 deleted file mode 100644 index 5be792660c19f..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19ab5b5800a3910d30d3a3e64fdb00fd0cb42de0 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 new file mode 100644 index 0000000000000..a0814f495771f --- /dev/null +++ b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 @@ -0,0 +1 @@ +f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 deleted file mode 100644 index fb42db6a9d15c..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd33c8b6373abaa675be407082fdfda35021254a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 new file mode 100644 index 0000000000000..101eb03b4b736 --- /dev/null +++ b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 @@ -0,0 +1 @@ +c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-9.5.jar.sha1 deleted file mode 100644 index ea4aa3581dc87..0000000000000 --- a/modules/lang-painless/licenses/asm-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc6ea1875f4d64fbc85e1691c95b96a3d8569c90 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-9.6.jar.sha1 new file mode 100644 index 0000000000000..2d9e6a9d3cfd6 --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.6.jar.sha1 @@ -0,0 +1 @@ +aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 deleted file mode 100644 index 9e87d3ce7d719..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -490bacc77de7cbc0be1a30bb3471072d705be4a4 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 new file mode 100644 index 0000000000000..fa42ea1198165 --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 @@ -0,0 +1 @@ +9ce6c7b174bd997fc2552dff47964546bd7a5ec3 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 deleted file mode 100644 index 5be792660c19f..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19ab5b5800a3910d30d3a3e64fdb00fd0cb42de0 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 new file mode 100644 index 0000000000000..a0814f495771f --- /dev/null +++ b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 @@ -0,0 +1 @@ +f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 deleted file mode 100644 index fb42db6a9d15c..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd33c8b6373abaa675be407082fdfda35021254a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 new file mode 100644 index 0000000000000..101eb03b4b736 --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 @@ -0,0 +1 @@ +c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 deleted file mode 100644 index 5fffbfe655deb..0000000000000 --- a/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -64b5a1fc8c1b15ed2efd6a063e976bc8d3dc5ffe \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 new file mode 100644 index 0000000000000..1f42ac62dc69c --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 @@ -0,0 +1 @@ +f77caf84eb93786a749b2baa40865b9613e3eaee \ No newline at end of file From fa66bebf88d6fc69765d5ecb26930ece94477024 Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Mon, 2 Oct 2023 13:31:02 -0700 Subject: [PATCH 09/14] Use of LogByteSizeMergePolicy for data stream use cases (#9992) * Configurable merge policy for index * additional setting to configure merge policy for timestamp based index * introduction of logbytesize merge policy as an option Signed-off-by: Rishabh Maurya * remove the trace log not required anymore Signed-off-by: Rishabh Maurya * Refactor the merge policy extraction logic Signed-off-by: Rishabh Maurya * Rename constant DEFAULT to DEFAULT_POLICY Signed-off-by: Rishabh Maurya * Simplify merge policy extraction and selection logic Signed-off-by: Rishabh Maurya * missing javadoc error Signed-off-by: Rishabh Maurya * Renaming log byte size policy setting with mb Signed-off-by: Rishabh Maurya * Move validation exception to enum from setting defn Signed-off-by: Rishabh Maurya * rename time_index to time_series_index Signed-off-by: Rishabh Maurya --------- Signed-off-by: Rishabh Maurya --- CHANGELOG.md | 1 + .../gateway/RecoveryFromGatewayIT.java | 4 +- .../RemoveCorruptedShardDataCommandIT.java | 4 +- .../index/store/CorruptedFileIT.java | 10 +- .../indices/stats/IndexStatsIT.java | 10 +- .../java/org/opensearch/update/UpdateIT.java | 4 +- .../common/settings/ClusterSettings.java | 1 + .../common/settings/IndexScopedSettings.java | 29 +- .../org/opensearch/index/IndexSettings.java | 156 ++++++- .../index/LogByteSizeMergePolicyProvider.java | 166 +++++++ .../opensearch/index/MergePolicyProvider.java | 31 ++ ...ig.java => TieredMergePolicyProvider.java} | 84 ++-- .../opensearch/index/shard/IndexShard.java | 2 +- .../segments/IndicesSegmentsRequestTests.java | 4 +- .../index/MergePolicySettingsTests.java | 421 +++++++++++++++--- .../index/MergeSchedulerSettingsTests.java | 8 +- .../RemoveCorruptedShardDataCommandTests.java | 4 +- .../indices/recovery/RecoveryTests.java | 4 +- .../test/OpenSearchIntegTestCase.java | 4 +- 19 files changed, 776 insertions(+), 171 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java create mode 100644 server/src/main/java/org/opensearch/index/MergePolicyProvider.java rename server/src/main/java/org/opensearch/index/{MergePolicyConfig.java => TieredMergePolicyProvider.java} (82%) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1834df696769..7519e242c0acf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) - Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 2bab61f3e1c4c..229cd7bffad2f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -53,7 +53,7 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.Engine; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.shard.ShardPath; @@ -519,7 +519,7 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { .put("number_of_replicas", 1) // disable merges to keep segments the same - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // expire retention leases quickly .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java index f8c2acbf99f70..b431079476624 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -73,7 +73,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.translog.TestTranslog; @@ -135,7 +135,7 @@ public void testCorruptIndex() throws Exception { Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "checksum") diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index 7e1d0792e3ddb..8291fef5d177b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -72,7 +72,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -167,7 +167,7 @@ public void testCorruptFileAndRecover() throws ExecutionException, InterruptedEx Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files @@ -286,7 +286,7 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose // no translog based flush - it might change the .liv / segments.N files @@ -552,7 +552,7 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files @@ -624,7 +624,7 @@ public void testReplicaCorruption() throws Exception { prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, cluster().numDataNodes() - 1) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index a0f01acd1f8e9..0967acb37d3e8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -66,8 +66,8 @@ import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; import org.opensearch.index.MergeSchedulerConfig; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.VersionType; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.engine.VersionConflictEngineException; @@ -589,8 +589,8 @@ public void testNonThrottleStats() throws Exception { prepareCreate("test").setSettings( settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000") ) @@ -621,8 +621,8 @@ public void testThrottleStats() throws Exception { prepareCreate("test").setSettings( settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "1") .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC.name()) diff --git a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java index 442268d513fc3..b46d27bafb2a5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java @@ -50,7 +50,7 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.DocumentMissingException; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.plugins.Plugin; @@ -669,7 +669,7 @@ public void run() { public void testStressUpdateDeleteConcurrency() throws Exception { // We create an index with merging disabled so that deletes don't get merged away - assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false))); + assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false))); ensureGreen(); Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 032027384f106..5261d40387dc6 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -457,6 +457,7 @@ public void apply(Settings value, Settings current, Settings previous) { NetworkService.TCP_CONNECT_TIMEOUT, IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, + IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY, ScriptService.SCRIPT_GENERAL_CACHE_SIZE_SETTING, ScriptService.SCRIPT_GENERAL_CACHE_EXPIRE_SETTING, ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 5b2afc44600bd..83bf8c82ee3dd 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -45,9 +45,11 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSortConfig; import org.opensearch.index.IndexingSlowLog; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.LogByteSizeMergePolicyProvider; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.SearchSlowLog; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.fielddata.IndexFieldDataService; @@ -120,14 +122,14 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING, - MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, IndexSortConfig.INDEX_SORT_FIELD_SETTING, IndexSortConfig.INDEX_SORT_ORDER_SETTING, IndexSortConfig.INDEX_SORT_MISSING_SETTING, @@ -202,6 +204,13 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED, IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME, IndexSettings.INDEX_MERGE_ON_FLUSH_POLICY, + IndexSettings.INDEX_MERGE_POLICY, + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING, IndexSettings.DEFAULT_SEARCH_PIPELINE, // Settings for Searchable Snapshots @@ -275,7 +284,7 @@ public boolean isPrivateSetting(String key) { case IndexMetadata.SETTING_HISTORY_UUID: case IndexMetadata.SETTING_VERSION_UPGRADED: case IndexMetadata.SETTING_INDEX_PROVIDED_NAME: - case MergePolicyConfig.INDEX_MERGE_ENABLED: + case MergePolicyProvider.INDEX_MERGE_ENABLED: // we keep the shrink settings for BWC - this can be removed in 8.0 // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 case "index.shrink.source.uuid": diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 1e4224c314f05..ce6c1a5ad6284 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -54,6 +54,7 @@ import org.opensearch.node.Node; import org.opensearch.search.pipeline.SearchPipelineService; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -83,9 +84,42 @@ */ @PublicApi(since = "1.0.0") public final class IndexSettings { - private static final String MERGE_ON_FLUSH_DEFAULT_POLICY = "default"; + private static final String DEFAULT_POLICY = "default"; private static final String MERGE_ON_FLUSH_MERGE_POLICY = "merge-on-flush"; + /** + * Enum representing supported merge policies + */ + public enum IndexMergePolicy { + TIERED("tiered"), + LOG_BYTE_SIZE("log_byte_size"), + DEFAULT_POLICY(IndexSettings.DEFAULT_POLICY); + + private final String value; + + IndexMergePolicy(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public static IndexMergePolicy fromString(String text) { + for (IndexMergePolicy policy : IndexMergePolicy.values()) { + if (policy.value.equals(text)) { + return policy; + } + } + throw new IllegalArgumentException( + "The setting has unsupported policy specified: " + + text + + ". Please use one of: " + + String.join(", ", Arrays.stream(IndexMergePolicy.values()).map(IndexMergePolicy::getValue).toArray(String[]::new)) + ); + } + } + public static final Setting> DEFAULT_FIELD_SETTING = Setting.listSetting( "index.query.default_field", Collections.singletonList("*"), @@ -566,11 +600,25 @@ public final class IndexSettings { public static final Setting INDEX_MERGE_ON_FLUSH_POLICY = Setting.simpleString( "index.merge_on_flush.policy", - MERGE_ON_FLUSH_DEFAULT_POLICY, + DEFAULT_POLICY, Property.IndexScope, Property.Dynamic ); + public static final Setting INDEX_MERGE_POLICY = Setting.simpleString( + "index.merge.policy", + DEFAULT_POLICY, + IndexMergePolicy::fromString, + Property.IndexScope + ); + + public static final Setting TIME_SERIES_INDEX_MERGE_POLICY = Setting.simpleString( + "indices.time_series_index.default_index_merge_policy", + DEFAULT_POLICY, + IndexMergePolicy::fromString, + Property.NodeScope + ); + public static final Setting SEARCHABLE_SNAPSHOT_REPOSITORY = Setting.simpleString( "index.searchable_snapshot.repository", Property.IndexScope, @@ -651,7 +699,8 @@ public final class IndexSettings { private volatile ByteSizeValue generationThresholdSize; private volatile ByteSizeValue flushAfterMergeThresholdSize; private final MergeSchedulerConfig mergeSchedulerConfig; - private final MergePolicyConfig mergePolicyConfig; + private final TieredMergePolicyProvider tieredMergePolicyProvider; + private final LogByteSizeMergePolicyProvider logByteSizeMergePolicyProvider; private final IndexSortConfig indexSortConfig; private final IndexScopedSettings scopedSettings; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); @@ -844,7 +893,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING); maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); - this.mergePolicyConfig = new MergePolicyConfig(logger, this); + this.tieredMergePolicyProvider = new TieredMergePolicyProvider(logger, this); + this.logByteSizeMergePolicyProvider = new LogByteSizeMergePolicyProvider(logger, this); this.indexSortConfig = new IndexSortConfig(this); searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER); defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); @@ -866,33 +916,59 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. */ widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); - - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, - mergePolicyConfig::setDeletesPctAllowed + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, + tieredMergePolicyProvider::setNoCFSRatio ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, - mergePolicyConfig::setExpungeDeletesAllowed + TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + tieredMergePolicyProvider::setDeletesPctAllowed ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, - mergePolicyConfig::setFloorSegmentSetting + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + tieredMergePolicyProvider::setExpungeDeletesAllowed ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - mergePolicyConfig::setMaxMergesAtOnce + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + tieredMergePolicyProvider::setFloorSegmentSetting ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, - mergePolicyConfig::setMaxMergedSegment + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + tieredMergePolicyProvider::setMaxMergesAtOnce ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, - mergePolicyConfig::setSegmentsPerTier + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + tieredMergePolicyProvider::setMaxMergedSegment + ); + scopedSettings.addSettingsUpdateConsumer( + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + tieredMergePolicyProvider::setSegmentsPerTier ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, + logByteSizeMergePolicyProvider::setLBSMergeFactor + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, + logByteSizeMergePolicyProvider::setLBSMinMergedMB + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeSegment + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeMBForForcedMerge + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeDocs + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING, + logByteSizeMergePolicyProvider::setLBSNoCFSRatio + ); scopedSettings.addSettingsUpdateConsumer( MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, @@ -1439,9 +1515,43 @@ public long getGcDeletesInMillis() { /** * Returns the merge policy that should be used for this index. - */ - public MergePolicy getMergePolicy() { - return mergePolicyConfig.getMergePolicy(); + * @param isTimeSeriesIndex true if index contains @timestamp field + */ + public MergePolicy getMergePolicy(boolean isTimeSeriesIndex) { + String indexScopedPolicy = scopedSettings.get(INDEX_MERGE_POLICY); + MergePolicyProvider mergePolicyProvider = null; + IndexMergePolicy indexMergePolicy = IndexMergePolicy.fromString(indexScopedPolicy); + switch (indexMergePolicy) { + case TIERED: + mergePolicyProvider = tieredMergePolicyProvider; + break; + case LOG_BYTE_SIZE: + mergePolicyProvider = logByteSizeMergePolicyProvider; + break; + case DEFAULT_POLICY: + if (isTimeSeriesIndex) { + String nodeScopedTimeSeriesIndexPolicy = TIME_SERIES_INDEX_MERGE_POLICY.get(nodeSettings); + IndexMergePolicy nodeMergePolicy = IndexMergePolicy.fromString(nodeScopedTimeSeriesIndexPolicy); + switch (nodeMergePolicy) { + case TIERED: + case DEFAULT_POLICY: + mergePolicyProvider = tieredMergePolicyProvider; + break; + case LOG_BYTE_SIZE: + mergePolicyProvider = logByteSizeMergePolicyProvider; + break; + } + } else { + mergePolicyProvider = tieredMergePolicyProvider; + } + break; + } + assert mergePolicyProvider != null : "should not happen as validation for invalid merge policy values " + + "are part of setting definition"; + if (logger.isTraceEnabled()) { + logger.trace("Index: " + this.index.getName() + ", Merge policy used: " + mergePolicyProvider); + } + return mergePolicyProvider.getMergePolicy(); } public T getValue(Setting setting) { @@ -1632,7 +1742,7 @@ public boolean isMergeOnFlushEnabled() { } private void setMergeOnFlushPolicy(String policy) { - if (Strings.isEmpty(policy) || MERGE_ON_FLUSH_DEFAULT_POLICY.equalsIgnoreCase(policy)) { + if (Strings.isEmpty(policy) || DEFAULT_POLICY.equalsIgnoreCase(policy)) { mergeOnFlushPolicy = null; } else if (MERGE_ON_FLUSH_MERGE_POLICY.equalsIgnoreCase(policy)) { this.mergeOnFlushPolicy = MergeOnFlushMergePolicy::new; @@ -1643,7 +1753,7 @@ private void setMergeOnFlushPolicy(String policy) { + " has unsupported policy specified: " + policy + ". Please use one of: " - + MERGE_ON_FLUSH_DEFAULT_POLICY + + DEFAULT_POLICY + ", " + MERGE_ON_FLUSH_MERGE_POLICY ); diff --git a/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java new file mode 100644 index 0000000000000..0b762d781957c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java @@ -0,0 +1,166 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LogByteSizeMergePolicy; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NoMergePolicy; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; + +import static org.apache.lucene.index.LogMergePolicy.DEFAULT_MAX_MERGE_DOCS; +import static org.apache.lucene.index.LogMergePolicy.DEFAULT_NO_CFS_RATIO; + +/** + *

+ * The LogByteSizeMergePolicy is an alternative merge policy primarily used here to optimize the merging of segments in scenarios + * with index with timestamps. + * While the TieredMergePolicy is the default choice, the LogByteSizeMergePolicy can be configured + * as the default merge policy for time-index data using the index.datastream_merge.policy setting. + * + *

+ * Unlike the TieredMergePolicy, which prioritizes merging segments of equal sizes, the LogByteSizeMergePolicy + * specializes in merging adjacent segments efficiently. + * This characteristic makes it particularly well-suited for range queries on time-index data. + * Typically, adjacent segments in time-index data often contain documents with similar timestamps. + * When these segments are merged, the resulting segment covers a range of timestamps with reduced overlap compared + * to the adjacent segments. This reduced overlap remains even as segments grow older and larger, + * which can significantly benefit range queries on timestamps. + * + *

+ * In contrast, the TieredMergePolicy does not honor this timestamp range optimization. It focuses on merging segments + * of equal sizes and does not consider adjacency. Consequently, as segments grow older and larger, + * the overlap of timestamp ranges among adjacent segments managed by TieredMergePolicy can increase. + * This can lead to inefficiencies in range queries on timestamps, as the number of segments to be scanned + * within a given timestamp range could become high. + * + * @opensearch.internal + */ +public class LogByteSizeMergePolicyProvider implements MergePolicyProvider { + private final LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy(); + + private final Logger logger; + private final boolean mergesEnabled; + + public static final ByteSizeValue DEFAULT_MIN_MERGE = new ByteSizeValue(2, ByteSizeUnit.MB); + public static final int DEFAULT_MERGE_FACTOR = 10; + + public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); + + public static final ByteSizeValue DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE = new ByteSizeValue(Long.MAX_VALUE); + + public static final Setting INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING = Setting.intSetting( + "index.merge.log_byte_size_policy.merge_factor", + DEFAULT_MERGE_FACTOR, // keeping it same as default max merge at once for tiered merge policy + 2, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.min_merge", + DEFAULT_MIN_MERGE, // keeping it same as default floor segment for tiered merge policy + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGE_SEGMENT_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.max_merge_segment", + DEFAULT_MAX_MERGED_SEGMENT, // keeping default same as tiered merge policy + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.max_merge_segment_forced_merge", + DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGED_DOCS_SETTING = Setting.intSetting( + "index.merge.log_byte_size_policy.max_merged_docs", + DEFAULT_MAX_MERGE_DOCS, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_NO_CFS_RATIO_SETTING = new Setting<>( + "index.merge.log_byte_size_policy.no_cfs_ratio", + Double.toString(DEFAULT_NO_CFS_RATIO), + TieredMergePolicyProvider::parseNoCFSRatio, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + LogByteSizeMergePolicyProvider(Logger logger, IndexSettings indexSettings) { + this.logger = logger; + this.mergesEnabled = indexSettings.getSettings().getAsBoolean(INDEX_MERGE_ENABLED, true); + + // Undocumented settings, works great with defaults + logByteSizeMergePolicy.setMergeFactor(indexSettings.getValue(INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING)); + logByteSizeMergePolicy.setMinMergeMB(indexSettings.getValue(INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING).getMbFrac()); + logByteSizeMergePolicy.setMaxMergeMB(indexSettings.getValue(INDEX_LBS_MAX_MERGE_SEGMENT_SETTING).getMbFrac()); + logByteSizeMergePolicy.setMaxMergeMBForForcedMerge( + indexSettings.getValue(INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING).getMbFrac() + ); + logByteSizeMergePolicy.setMaxMergeDocs(indexSettings.getValue(INDEX_LBS_MAX_MERGED_DOCS_SETTING)); + logByteSizeMergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_LBS_NO_CFS_RATIO_SETTING)); + } + + @Override + public MergePolicy getMergePolicy() { + return mergesEnabled ? logByteSizeMergePolicy : NoMergePolicy.INSTANCE; + } + + void setLBSMergeFactor(int mergeFactor) { + logByteSizeMergePolicy.setMergeFactor(mergeFactor); + } + + void setLBSMaxMergeSegment(ByteSizeValue maxMergeSegment) { + logByteSizeMergePolicy.setMaxMergeMB(maxMergeSegment.getMbFrac()); + } + + void setLBSMinMergedMB(ByteSizeValue minMergedSize) { + logByteSizeMergePolicy.setMinMergeMB(minMergedSize.getMbFrac()); + } + + void setLBSMaxMergeMBForForcedMerge(ByteSizeValue maxMergeForcedMerge) { + logByteSizeMergePolicy.setMaxMergeMBForForcedMerge(maxMergeForcedMerge.getMbFrac()); + } + + void setLBSMaxMergeDocs(int maxMergeDocs) { + logByteSizeMergePolicy.setMaxMergeDocs(maxMergeDocs); + } + + void setLBSNoCFSRatio(Double noCFSRatio) { + logByteSizeMergePolicy.setNoCFSRatio(noCFSRatio); + } + + @Override + public String toString() { + return "LogByteSizeMergePolicyProvider{" + + "mergeFactor=" + + logByteSizeMergePolicy.getMergeFactor() + + ", minMergeMB=" + + logByteSizeMergePolicy.getMinMergeMB() + + ", maxMergeMB=" + + logByteSizeMergePolicy.getMaxMergeMB() + + ", maxMergeMBForForcedMerge=" + + logByteSizeMergePolicy.getMaxMergeMBForForcedMerge() + + ", maxMergedDocs=" + + logByteSizeMergePolicy.getMaxMergeDocs() + + ", noCFSRatio=" + + logByteSizeMergePolicy.getNoCFSRatio() + + '}'; + } + +} diff --git a/server/src/main/java/org/opensearch/index/MergePolicyProvider.java b/server/src/main/java/org/opensearch/index/MergePolicyProvider.java new file mode 100644 index 0000000000000..6f734314f758f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/MergePolicyProvider.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.lucene.index.MergePolicy; +import org.opensearch.common.annotation.InternalApi; + +/** + * A provider for obtaining merge policies used by OpenSearch indexes. + * + * @opensearch.internal + */ + +@InternalApi +public interface MergePolicyProvider { + // don't convert to Setting<> and register... we only set this in tests and register via a plugin + String INDEX_MERGE_ENABLED = "index.merge.enabled"; + + /** + * Gets the merge policy to be used for index. + * + * @return The merge policy instance. + */ + MergePolicy getMergePolicy(); +} diff --git a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java similarity index 82% rename from server/src/main/java/org/opensearch/index/MergePolicyConfig.java rename to server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java index fe2af21dfe039..d5d354c6c960a 100644 --- a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java @@ -33,6 +33,7 @@ package org.opensearch.index; import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.TieredMergePolicy; @@ -47,9 +48,12 @@ * where the index data is stored, and are immutable up to delete markers. * Segments are, periodically, merged into larger segments to keep the * index size at bay and expunge deletes. + * This class customizes and exposes 2 merge policies from lucene - + * {@link LogByteSizeMergePolicy} and {@link TieredMergePolicy}. + * * *

- * Merges select segments of approximately equal size, subject to an allowed + * Tiered merge policy select segments of approximately equal size, subject to an allowed * number of segments per tier. The merge policy is able to merge * non-adjacent segments, and separates how many segments are merged at once from how many * segments are allowed per tier. It also does not over-merge (i.e., cascade merges). @@ -125,8 +129,9 @@ * @opensearch.internal */ -public final class MergePolicyConfig { - private final OpenSearchTieredMergePolicy mergePolicy = new OpenSearchTieredMergePolicy(); +public final class TieredMergePolicyProvider implements MergePolicyProvider { + private final OpenSearchTieredMergePolicy tieredMergePolicy = new OpenSearchTieredMergePolicy(); + private final Logger logger; private final boolean mergesEnabled; @@ -137,10 +142,11 @@ public final class MergePolicyConfig { public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; public static final double DEFAULT_DELETES_PCT_ALLOWED = 20.0d; + public static final Setting INDEX_COMPOUND_FORMAT_SETTING = new Setting<>( "index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), - MergePolicyConfig::parseNoCFSRatio, + TieredMergePolicyProvider::parseNoCFSRatio, Property.Dynamic, Property.IndexScope ); @@ -194,10 +200,8 @@ public final class MergePolicyConfig { Property.Dynamic, Property.IndexScope ); - // don't convert to Setting<> and register... we only set this in tests and register via a plugin - public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; - MergePolicyConfig(Logger logger, IndexSettings indexSettings) { + TieredMergePolicyProvider(Logger logger, IndexSettings indexSettings) { this.logger = logger; double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); @@ -216,54 +220,41 @@ public final class MergePolicyConfig { ); } maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier); - mergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); - mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); - mergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); - mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); - mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); - mergePolicy.setSegmentsPerTier(segmentsPerTier); - mergePolicy.setDeletesPctAllowed(deletesPctAllowed); - if (logger.isTraceEnabled()) { - logger.trace( - "using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}]," - + " max_merge_at_once[{}], max_merged_segment[{}], segments_per_tier[{}]," - + " deletes_pct_allowed[{}]", - forceMergeDeletesPctAllowed, - floorSegment, - maxMergeAtOnce, - maxMergedSegment, - segmentsPerTier, - deletesPctAllowed - ); - } + tieredMergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); + tieredMergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); + tieredMergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); + tieredMergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); + tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); + tieredMergePolicy.setSegmentsPerTier(segmentsPerTier); + tieredMergePolicy.setDeletesPctAllowed(deletesPctAllowed); } void setSegmentsPerTier(Double segmentsPerTier) { - mergePolicy.setSegmentsPerTier(segmentsPerTier); + tieredMergePolicy.setSegmentsPerTier(segmentsPerTier); } void setMaxMergedSegment(ByteSizeValue maxMergedSegment) { - mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); + tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); } void setMaxMergesAtOnce(Integer maxMergeAtOnce) { - mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); + tieredMergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); } void setFloorSegmentSetting(ByteSizeValue floorSegementSetting) { - mergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); + tieredMergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); } void setExpungeDeletesAllowed(Double value) { - mergePolicy.setForceMergeDeletesPctAllowed(value); + tieredMergePolicy.setForceMergeDeletesPctAllowed(value); } void setNoCFSRatio(Double noCFSRatio) { - mergePolicy.setNoCFSRatio(noCFSRatio); + tieredMergePolicy.setNoCFSRatio(noCFSRatio); } void setDeletesPctAllowed(Double deletesPctAllowed) { - mergePolicy.setDeletesPctAllowed(deletesPctAllowed); + tieredMergePolicy.setDeletesPctAllowed(deletesPctAllowed); } private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerTier) { @@ -285,11 +276,11 @@ private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerT return maxMergeAtOnce; } - MergePolicy getMergePolicy() { - return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE; + public MergePolicy getMergePolicy() { + return mergesEnabled ? tieredMergePolicy : NoMergePolicy.INSTANCE; } - private static double parseNoCFSRatio(String noCFSRatio) { + public static double parseNoCFSRatio(String noCFSRatio) { noCFSRatio = noCFSRatio.trim(); if (noCFSRatio.equalsIgnoreCase("true")) { return 1.0d; @@ -310,4 +301,23 @@ private static double parseNoCFSRatio(String noCFSRatio) { } } } + + @Override + public String toString() { + return "TieredMergePolicyProvider{" + + "expungeDeletesAllowed=" + + tieredMergePolicy.getForceMergeDeletesPctAllowed() + + ", floorSegment=" + + tieredMergePolicy.getFloorSegmentMB() + + ", maxMergeAtOnce=" + + tieredMergePolicy.getMaxMergeAtOnce() + + ", maxMergedSegment=" + + tieredMergePolicy.getMaxMergedSegmentMB() + + ", segmentsPerTier=" + + tieredMergePolicy.getSegmentsPerTier() + + ", deletesPctAllowed=" + + tieredMergePolicy.getDeletesPctAllowed() + + '}'; + } + } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index d476e8b7c9288..5ce066b156775 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3772,7 +3772,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro indexSettings, warmer, store, - indexSettings.getMergePolicy(), + indexSettings.getMergePolicy(isTimeSeriesIndex), mapperService != null ? mapperService.indexAnalyzer() : null, similarityService.similarity(mapperService), engineConfigFactory.newCodecServiceOrDefault(indexSettings, mapperService, logger, codecService), diff --git a/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java index 67846efab2af8..d35c821b41aa0 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java @@ -34,7 +34,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.settings.Settings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.indices.IndexClosedException; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; @@ -56,7 +56,7 @@ protected Collection> getPlugins() { public void setupIndex() { Settings settings = Settings.builder() // don't allow any merges so that the num docs is the expected segments - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .build(); createIndex("test", settings); diff --git a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java index 387997892ee30..32c4c048d77ba 100644 --- a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java @@ -31,6 +31,7 @@ package org.opensearch.index; +import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; @@ -49,17 +50,17 @@ public class MergePolicySettingsTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); public void testCompoundFileSettings() throws IOException { - assertThat(new MergePolicyConfig(logger, indexSettings(Settings.EMPTY)).getMergePolicy().getNoCFSRatio(), equalTo(0.1)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(Settings.EMPTY)).getMergePolicy().getNoCFSRatio(), equalTo(0.1)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); } private static IndexSettings indexSettings(Settings settings) { @@ -67,33 +68,197 @@ private static IndexSettings indexSettings(Settings settings) { } public void testNoMerges() { - MergePolicyConfig mp = new MergePolicyConfig( + TieredMergePolicyProvider tmp = new TieredMergePolicyProvider( logger, - indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build()) + indexSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false).build()) ); - assertTrue(mp.getMergePolicy() instanceof NoMergePolicy); + LogByteSizeMergePolicyProvider lbsmp = new LogByteSizeMergePolicyProvider( + logger, + indexSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false).build()) + ); + assertTrue(tmp.getMergePolicy() instanceof NoMergePolicy); + assertTrue(lbsmp.getMergePolicy() instanceof NoMergePolicy); } public void testUpdateSettings() throws IOException { - IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); - assertThat(indexSettings.getMergePolicy().getNoCFSRatio(), equalTo(0.1)); + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + IndexSettings indexSettings = indexSettings(settings); + assertThat(indexSettings.getMergePolicy(false).getNoCFSRatio(), equalTo(0.1)); indexSettings = indexSettings(build(0.9)); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.9)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.9)); indexSettings.updateIndexMetadata(newIndexMeta("index", build(0.1))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.1)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.1)); indexSettings.updateIndexMetadata(newIndexMeta("index", build(0.0))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.0)); indexSettings.updateIndexMetadata(newIndexMeta("index", build("true"))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(1.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(1.0)); indexSettings.updateIndexMetadata(newIndexMeta("index", build("false"))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.0)); + } + + public void testDefaultMergePolicy() throws IOException { + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + } + + public void testMergePolicyPrecedence() throws IOException { + // 1. INDEX_MERGE_POLICY is not set + // assert defaults + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 1.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as log_byte_size + // assert index policy is tiered whereas time series index policy is log_byte_size + Settings nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build(); + indexSettings = new IndexSettings(newIndexMeta("test", Settings.EMPTY), nodeSettings); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + // 1.2 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as tiered + // assert both index and time series index policy is tiered + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + indexSettings = new IndexSettings(newIndexMeta("test", Settings.EMPTY), nodeSettings); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 2. INDEX_MERGE_POLICY set as tiered + // assert both index and time-series-index merge policy is set as tiered + indexSettings = indexSettings( + Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()).build() + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 2.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as log_byte_size + // assert both index and time-series-index merge policy is set as tiered + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build(); + indexSettings = new IndexSettings( + newIndexMeta( + "test", + Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()).build() + ), + nodeSettings + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 3. INDEX_MERGE_POLICY set as log_byte_size + // assert both index and time-series-index merge policy is set as log_byte_size + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof LogByteSizeMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + // 3.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as tiered + // assert both index and time-series-index merge policy is set as log_byte_size + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + indexSettings = new IndexSettings( + newIndexMeta( + "test", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ), + nodeSettings + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof LogByteSizeMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + } + + public void testInvalidMergePolicy() throws IOException { + + final Settings invalidSettings = Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "invalid").build(); + IllegalArgumentException exc1 = expectThrows( + IllegalArgumentException.class, + () -> IndexSettings.INDEX_MERGE_POLICY.get(invalidSettings) + ); + assertThat(exc1.getMessage(), containsString(" has unsupported policy specified: ")); + IllegalArgumentException exc2 = expectThrows( + IllegalArgumentException.class, + () -> indexSettings(invalidSettings).getMergePolicy(false) + ); + assertThat(exc2.getMessage(), containsString(" has unsupported policy specified: ")); + + final Settings invalidSettings2 = Settings.builder().put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), "invalid").build(); + IllegalArgumentException exc3 = expectThrows( + IllegalArgumentException.class, + () -> IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.get(invalidSettings2) + ); + assertThat(exc3.getMessage(), containsString(" has unsupported policy specified: ")); + + IllegalArgumentException exc4 = expectThrows( + IllegalArgumentException.class, + () -> new IndexSettings(newIndexMeta("test", Settings.EMPTY), invalidSettings2).getMergePolicy(true) + ); + assertThat(exc4.getMessage(), containsString(" has unsupported policy specified: ")); + } + + public void testUpdateSettingsForLogByteSizeMergePolicy() throws IOException { + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + assertThat(indexSettings.getMergePolicy(true).getNoCFSRatio(), equalTo(0.1)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.9) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.9)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.1) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.1)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.0) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.0)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), "true") + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(1.0)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), "false") + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.0)); } public void testTieredMergePolicySettingsUpdate() throws IOException { IndexSettings indexSettings = indexSettings(Settings.EMPTY); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d ); @@ -102,21 +267,21 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0 ); indexSettings.updateIndexMetadata( @@ -124,41 +289,41 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE ); indexSettings.updateIndexMetadata( newIndexMeta( "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE - 1 ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE - 1 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001 ); indexSettings.updateIndexMetadata( @@ -166,21 +331,21 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1) + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1) ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0 ); indexSettings.updateIndexMetadata( @@ -188,37 +353,37 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1 + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1 ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1, 0 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_DELETES_PCT_ALLOWED, 0 ); indexSettings.updateIndexMetadata( newIndexMeta( "index", - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build() + Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build() ) ); - assertEquals(((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), 22, 0); + assertEquals(((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), 22, 0); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, () -> indexSettings.updateIndexMetadata( newIndexMeta( "index", - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build() + Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build() ) ) ); @@ -226,50 +391,162 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { assertThat(cause.getMessage(), containsString("must be <= 50.0")); indexSettings.updateIndexMetadata(newIndexMeta("index", EMPTY_SETTINGS)); // see if defaults are restored assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_DELETES_PCT_ALLOWED, 0 ); } + public void testLogByteSizeMergePolicySettingsUpdate() throws IOException { + + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + 1 + ) + .build() + ) + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + 1 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING.getKey(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE.getMb() + 1, ByteSizeUnit.MB) + ) + .build() + ) + ); + + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING.getKey(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMb() + 100, ByteSizeUnit.MB) + ) + .build() + ) + ); + + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMb() + 100, ByteSizeUnit.MB).getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING.getKey(), + new ByteSizeValue( + LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE.getMb() - 100, + ByteSizeUnit.MB + ) + ) + .build() + ) + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMBForForcedMerge(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE.getMb() - 100, ByteSizeUnit.MB) + .getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING.getKey(), 10000000) + .build() + ) + ); + assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeDocs(), 10000000); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.1) + .build() + ) + ); + assertEquals(indexSettings.getMergePolicy(true).getNoCFSRatio(), 0.1, 0.0); + } + public Settings build(String value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(double value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(int value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(boolean value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } } diff --git a/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java b/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java index 2443ee1ab40be..baaf584702f78 100644 --- a/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java @@ -92,8 +92,8 @@ public void testUpdateAutoThrottleSettings() throws Exception { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "2") .put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "true"); @@ -123,8 +123,8 @@ public void testUpdateMergeMaxThreadCount() throws Exception { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "10000") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000"); IndexSettings settings = new IndexSettings(newIndexMeta("index", builder.build()), Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 9c8f9896850c6..c88c86d51be08 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -58,7 +58,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.PersistedClusterStateService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.EngineConfigFactory; import org.opensearch.index.engine.EngineCreationFailureException; import org.opensearch.index.engine.InternalEngineFactory; @@ -134,7 +134,7 @@ public void setup() throws IOException { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_INDEX_UUID, shardId.getIndex().getUUID()) .build(); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index 11d916616578d..ad90255a3cc3f 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -54,7 +54,7 @@ import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.VersionType; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; @@ -168,7 +168,7 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10) // If soft-deletes is enabled, delete#1 will be reclaimed because its segment (segment_1) is fully deleted // index#0 will be retained if merge is disabled; otherwise it will be reclaimed because gcp=3 and retained_ops=0 - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .build(); try (ReplicationGroup shards = createGroup(1, settings)) { shards.startAll(); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 6e064f943ca07..0b80c6e577f95 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -130,9 +130,9 @@ import org.opensearch.http.HttpInfo; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.MockEngineFactoryPlugin; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.Segment; import org.opensearch.index.mapper.CompletionFieldMapper; @@ -500,7 +500,7 @@ protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builde private static Settings.Builder setRandomIndexMergeSettings(Random random, Settings.Builder builder) { if (random.nextBoolean()) { builder.put( - MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), (random.nextBoolean() ? random.nextDouble() : random.nextBoolean()).toString() ); } From d3bf230fe820f20121f3be35ca42b97cf804fea1 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Mon, 2 Oct 2023 13:47:48 -0700 Subject: [PATCH 10/14] Simplify initialization of Settings (#10194) There was a weird circular class-loading dependency between Settings and Settings.Builder that could result in ClassLoader deadlock if multiple threads trigger class-loading of Settings at the same time. Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + .../main/java/org/opensearch/common/settings/Settings.java | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7519e242c0acf..46f34abf72641 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -129,6 +129,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) - Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) - Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) +- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) ### Security diff --git a/server/src/main/java/org/opensearch/common/settings/Settings.java b/server/src/main/java/org/opensearch/common/settings/Settings.java index 91e39e38f0379..d202feb0786bf 100644 --- a/server/src/main/java/org/opensearch/common/settings/Settings.java +++ b/server/src/main/java/org/opensearch/common/settings/Settings.java @@ -101,7 +101,7 @@ @PublicApi(since = "1.0.0") public final class Settings implements ToXContentFragment { - public static final Settings EMPTY = new Builder().build(); + public static final Settings EMPTY = new Settings(Collections.emptyMap(), null); /** The raw settings from the full key to raw string value. */ private final Map settings; @@ -757,7 +757,7 @@ public Set keySet() { @PublicApi(since = "1.0.0") public static class Builder { - public static final Settings EMPTY_SETTINGS = new Builder().build(); + public static final Settings EMPTY_SETTINGS = Settings.EMPTY; // we use a sorted map for consistent serialization when using getAsMap() private final Map map = new TreeMap<>(); From 9d0db5ef413ebc9f2eb7d8721ef74be273908094 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Romain=20Tarti=C3=A8re?= Date: Mon, 2 Oct 2023 10:58:24 -1000 Subject: [PATCH 11/14] Remove spurious SGID (#9447) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Setting the SGID bit on directories is maybe something some users will want to use, but setting it by default for all users does not really make sense and when packaging OpenSearch, we need to remove this customization when building packges. This was added to ElasticSearch to make it possible to manage the keystore as root while the service runs as an unprivileged user. Without the SGID trick, the generated keystore was owned by root and ElasticSearch could not access it. It is preferable to manage the keystore with non-root privileges, and this hack is not required in this case. Stick to the default permissions and remove this personalization. Signed-off-by: Romain Tartière --- CHANGELOG.md | 1 + distribution/packages/build.gradle | 8 ++++---- distribution/packages/src/deb/lintian/opensearch | 8 ++++---- .../test/java/org/opensearch/packaging/util/Packages.java | 4 ++-- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 46f34abf72641..c8e4cc57593a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -123,6 +123,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Deprecated ### Removed +- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) ### Fixed - Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 7914fcc172ef4..262ad6c802bbb 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -213,7 +213,7 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { configurationFile '/etc/opensearch/jvm.options' configurationFile '/etc/opensearch/log4j2.properties' from("${packagingFiles}") { - dirMode 02750 + dirMode 0750 into('/etc') permissionGroup 'opensearch' includeEmptyDirs true @@ -223,7 +223,7 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { } from("${packagingFiles}/etc/opensearch") { into('/etc/opensearch') - dirMode 02750 + dirMode 0750 fileMode 0660 permissionGroup 'opensearch' includeEmptyDirs true @@ -281,8 +281,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { dirMode mode } } - copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 02750) - copyEmptyDir('/var/lib/opensearch', 'opensearch', 'opensearch', 02750) + copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 0750) + copyEmptyDir('/var/lib/opensearch', 'opensearch', 'opensearch', 0750) copyEmptyDir('/usr/share/opensearch/plugins', 'root', 'root', 0755) into '/usr/share/opensearch' diff --git a/distribution/packages/src/deb/lintian/opensearch b/distribution/packages/src/deb/lintian/opensearch index 854b23131ecbc..e6db8e8c6b322 100644 --- a/distribution/packages/src/deb/lintian/opensearch +++ b/distribution/packages/src/deb/lintian/opensearch @@ -15,11 +15,11 @@ missing-dep-on-jarwrapper # we prefer to not make our config and log files world readable non-standard-file-perm etc/default/opensearch 0660 != 0644 -non-standard-dir-perm etc/opensearch/ 2750 != 0755 -non-standard-dir-perm etc/opensearch/jvm.options.d/ 2750 != 0755 +non-standard-dir-perm etc/opensearch/ 0750 != 0755 +non-standard-dir-perm etc/opensearch/jvm.options.d/ 0750 != 0755 non-standard-file-perm etc/opensearch/* -non-standard-dir-perm var/lib/opensearch/ 2750 != 0755 -non-standard-dir-perm var/log/opensearch/ 2750 != 0755 +non-standard-dir-perm var/lib/opensearch/ 0750 != 0755 +non-standard-dir-perm var/log/opensearch/ 0750 != 0755 executable-is-not-world-readable etc/init.d/opensearch 0750 non-standard-file-permissions-for-etc-init.d-script etc/init.d/opensearch 0750 != 0755 diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java index b80ae422bda9a..e9ebf28042b46 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java @@ -194,11 +194,11 @@ private static void verifyInstallation(Installation opensearch, Distribution dis // we shell out here because java's posix file permission view doesn't support special modes assertThat(opensearch.config, file(Directory, "root", "opensearch", p750)); - assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750")); final Path jvmOptionsDirectory = opensearch.config.resolve("jvm.options.d"); assertThat(jvmOptionsDirectory, file(Directory, "root", "opensearch", p750)); - assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750")); Stream.of("opensearch.keystore", "opensearch.yml", "jvm.options", "log4j2.properties") .forEach(configFile -> assertThat(opensearch.config(configFile), file(File, "root", "opensearch", p660))); From 1d66af346f7863bbca44a3102a1af2fed5fd3804 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Mon, 2 Oct 2023 17:51:47 -0400 Subject: [PATCH 12/14] Fix: register mulitple extensions. (#10256) * Fix: register mulitple extensions. Signed-off-by: dblock * Updated CHANGELOG. Signed-off-by: dblock * Added tests. Signed-off-by: dblock --------- Signed-off-by: dblock --- CHANGELOG.md | 1 + .../extensions/ExtensionsManager.java | 21 ++- .../rest/RestActionsRequestHandler.java | 3 + .../rest/RestInitializeExtensionAction.java | 3 +- .../rest/RestSendToExtensionAction.java | 2 +- .../extensions/ExtensionsManagerTests.java | 145 +++++++++++++++++- .../RestInitializeExtensionActionTests.java | 19 +-- .../rest/RestSendToExtensionActionTests.java | 6 +- 8 files changed, 176 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c8e4cc57593a4..fb37533d4c834 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -131,6 +131,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) - Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) - Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) +- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) ### Security diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java index 9f9ba548143c6..b531abcb845d7 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -300,7 +300,7 @@ private void registerRequestHandler(DynamicActionRegistry dynamicActionRegistry) * Loads a single extension * @param extension The extension to be loaded */ - public void loadExtension(Extension extension) throws IOException { + public DiscoveryExtensionNode loadExtension(Extension extension) throws IOException { validateExtension(extension); DiscoveryExtensionNode discoveryExtensionNode = new DiscoveryExtensionNode( extension.getName(), @@ -314,6 +314,12 @@ public void loadExtension(Extension extension) throws IOException { extensionIdMap.put(extension.getUniqueId(), discoveryExtensionNode); extensionSettingsMap.put(extension.getUniqueId(), extension); logger.info("Loaded extension with uniqueId " + extension.getUniqueId() + ": " + extension); + return discoveryExtensionNode; + } + + public void initializeExtension(Extension extension) throws IOException { + DiscoveryExtensionNode node = loadExtension(extension); + initializeExtensionNode(node); } private void validateField(String fieldName, String value) throws IOException { @@ -340,11 +346,11 @@ private void validateExtension(Extension extension) throws IOException { */ public void initialize() { for (DiscoveryExtensionNode extension : extensionIdMap.values()) { - initializeExtension(extension); + initializeExtensionNode(extension); } } - private void initializeExtension(DiscoveryExtensionNode extension) { + public void initializeExtensionNode(DiscoveryExtensionNode extensionNode) { final CompletableFuture inProgressFuture = new CompletableFuture<>(); final TransportResponseHandler initializeExtensionResponseHandler = new TransportResponseHandler< @@ -384,7 +390,8 @@ public String executor() { transportService.getThreadPool().generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - extensionIdMap.remove(extension.getId()); + logger.warn("Error registering extension: " + extensionNode.getId(), e); + extensionIdMap.remove(extensionNode.getId()); if (e.getCause() instanceof ConnectTransportException) { logger.info("No response from extension to request.", e); throw (ConnectTransportException) e.getCause(); @@ -399,11 +406,11 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { - transportService.connectToExtensionNode(extension); + transportService.connectToExtensionNode(extensionNode); transportService.sendRequest( - extension, + extensionNode, REQUEST_EXTENSION_ACTION_NAME, - new InitializeExtensionRequest(transportService.getLocalNode(), extension, issueServiceAccount(extension)), + new InitializeExtensionRequest(transportService.getLocalNode(), extensionNode, issueServiceAccount(extensionNode)), initializeExtensionResponseHandler ); } diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java index 97851cbd394a0..383796f0c3b44 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java @@ -62,6 +62,9 @@ public TransportResponse handleRegisterRestActionsRequest( DynamicActionRegistry dynamicActionRegistry ) throws Exception { DiscoveryExtensionNode discoveryExtensionNode = extensionIdMap.get(restActionsRequest.getUniqueId()); + if (discoveryExtensionNode == null) { + throw new IllegalStateException("Missing extension node for " + restActionsRequest.getUniqueId()); + } RestHandler handler = new RestSendToExtensionAction( restActionsRequest, discoveryExtensionNode, diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java index 4b622b841a040..fc7c21a6eccd6 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java @@ -159,8 +159,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client extAdditionalSettings ); try { - extensionsManager.loadExtension(extension); - extensionsManager.initialize(); + extensionsManager.initializeExtension(extension); } catch (CompletionException e) { Throwable cause = e.getCause(); if (cause instanceof TimeoutException) { diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index 33f44a913dd8a..41783b89ccc69 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -150,7 +150,7 @@ public RestSendToExtensionAction( @Override public String getName() { - return SEND_TO_EXTENSION_ACTION; + return this.discoveryExtensionNode.getId() + ":" + SEND_TO_EXTENSION_ACTION; } @Override diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index f243a924f4e63..c61afdd5c5261 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -36,6 +36,7 @@ import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.discovery.InitializeExtensionRequest; import org.opensearch.env.Environment; import org.opensearch.env.EnvironmentSettingsResponse; import org.opensearch.extensions.ExtensionsSettings.Extension; @@ -77,6 +78,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -409,19 +411,94 @@ public void testInitialize() throws Exception { ) ); - // Test needs to be changed to mock the connection between the local node and an extension. Assert statment is commented out for - // now. + // Test needs to be changed to mock the connection between the local node and an extension. // Link to issue: https://github.com/opensearch-project/OpenSearch/issues/4045 // mockLogAppender.assertAllExpectationsMatched(); } } + public void testInitializeExtension() throws Exception { + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + + TransportService mockTransportService = spy( + new TransportService( + Settings.EMPTY, + mock(Transport.class), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, + null, + Collections.emptySet(), + NoopTracer.INSTANCE + ) + ); + + doNothing().when(mockTransportService).connectToExtensionNode(any(DiscoveryExtensionNode.class)); + + doNothing().when(mockTransportService) + .sendRequest(any(DiscoveryExtensionNode.class), anyString(), any(InitializeExtensionRequest.class), any()); + + extensionsManager.initializeServicesAndRestHandler( + actionModule, + settingsModule, + mockTransportService, + clusterService, + settings, + client, + identityService + ); + + Extension firstExtension = new Extension( + "firstExtension", + "uniqueid1", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + null + ); + + extensionsManager.initializeExtension(firstExtension); + + Extension secondExtension = new Extension( + "secondExtension", + "uniqueid2", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + null + ); + + extensionsManager.initializeExtension(secondExtension); + + ThreadPool.terminate(threadPool, 3, TimeUnit.SECONDS); + + verify(mockTransportService, times(2)).connectToExtensionNode(any(DiscoveryExtensionNode.class)); + + verify(mockTransportService, times(2)).sendRequest( + any(DiscoveryExtensionNode.class), + anyString(), + any(InitializeExtensionRequest.class), + any() + ); + } + public void testHandleRegisterRestActionsRequest() throws Exception { ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); + List actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); List deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -431,6 +508,58 @@ public void testHandleRegisterRestActionsRequest() throws Exception { assertTrue(((AcknowledgedResponse) response).getStatus()); } + public void testHandleRegisterRestActionsRequestRequiresDiscoveryNode() throws Exception { + + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + initialize(extensionsManager); + + RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest("uniqueId1", List.of(), List.of()); + + expectThrows( + IllegalStateException.class, + () -> extensionsManager.getRestActionsRequestHandler() + .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()) + ); + } + + public void testHandleRegisterRestActionsRequestMultiple() throws Exception { + + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + initialize(extensionsManager); + + List actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); + List deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); + for (int i = 0; i < 2; i++) { + String uniqueIdStr = "uniqueid-%d" + i; + + Set> additionalSettings = extAwarePlugin.getExtensionSettings().stream().collect(Collectors.toSet()); + ExtensionScopedSettings extensionScopedSettings = new ExtensionScopedSettings(additionalSettings); + Extension firstExtension = new Extension( + "Extension %s" + i, + uniqueIdStr, + "127.0.0.0", + "9300", + "0.0.7", + "3.0.0", + "3.0.0", + List.of(), + extensionScopedSettings + ); + + extensionsManager.loadExtension(firstExtension); + + RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest( + uniqueIdStr, + actionsList, + deprecatedActionsList + ); + TransportResponse response = extensionsManager.getRestActionsRequestHandler() + .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()); + assertEquals(AcknowledgedResponse.class, response.getClass()); + assertTrue(((AcknowledgedResponse) response).getStatus()); + } + } + public void testHandleRegisterSettingsRequest() throws Exception { ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); @@ -452,6 +581,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Excep initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("FOO /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -467,6 +599,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedMethod() th initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("FOO /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -481,6 +616,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exceptio ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("GET", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -495,6 +633,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedUri() throw ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java index cdddf8e9be1be..e237214ab88f5 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java @@ -19,8 +19,9 @@ import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.ExtensionsManager; -import org.opensearch.extensions.ExtensionsSettings; +import org.opensearch.extensions.ExtensionsSettings.Extension; import org.opensearch.identity.IdentityService; import org.opensearch.rest.RestRequest; import org.opensearch.telemetry.tracing.noop.NoopTracer; @@ -160,8 +161,8 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th // optionally, you can stub out some methods: when(spy.getAdditionalSettings()).thenCallRealMethod(); - Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); - Mockito.doNothing().when(spy).initialize(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(Extension.class)); + Mockito.doNothing().when(spy).initializeExtensionNode(any(DiscoveryExtensionNode.class)); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" @@ -177,10 +178,10 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th FakeRestChannel channel = new FakeRestChannel(request, false, 0); restInitializeExtensionAction.handleRequest(request, channel, null); - assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertEquals(RestStatus.ACCEPTED, channel.capturedResponse().status()); assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); - Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); assertTrue(extension.isPresent()); assertEquals(true, extension.get().getAdditionalSettings().get(boolSetting)); assertEquals("customSetting", extension.get().getAdditionalSettings().get(stringSetting)); @@ -210,8 +211,8 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing // optionally, you can stub out some methods: when(spy.getAdditionalSettings()).thenCallRealMethod(); - Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); - Mockito.doNothing().when(spy).initialize(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(Extension.class)); + Mockito.doNothing().when(spy).initializeExtensionNode(any(DiscoveryExtensionNode.class)); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" @@ -227,10 +228,10 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing FakeRestChannel channel = new FakeRestChannel(request, false, 0); restInitializeExtensionAction.handleRequest(request, channel, null); - assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertEquals(RestStatus.ACCEPTED, channel.capturedResponse().status()); assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); - Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); assertTrue(extension.isPresent()); assertEquals(false, extension.get().getAdditionalSettings().get(boolSetting)); assertEquals("default", extension.get().getAdditionalSettings().get(stringSetting)); diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index ee36ea170e270..fe738ff7d85e6 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -150,7 +150,7 @@ public void testRestSendToExtensionAction() throws Exception { identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; expected.add(new Route(Method.GET, uriPrefix + "/foo")); @@ -183,7 +183,7 @@ public void testRestSendToExtensionActionWithNamedRoute() throws Exception { identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET).path(uriPrefix + "/foo").uniqueName("foo").build(); @@ -229,7 +229,7 @@ public void testRestSendToExtensionActionWithNamedRouteAndLegacyActionName() thr identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET) From beb25b1957321234aa3642977b0f7144318c8d8a Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 2 Oct 2023 18:21:58 -0400 Subject: [PATCH 13/14] Bump netty from 4.1.97.Final to 4.1.99.Final (#10306) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 + .../transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 | 1 - .../transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.97.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.97.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.97.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.99.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.97.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 + 68 files changed, 35 insertions(+), 34 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index fb37533d4c834..9b4e6408f3491 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -110,6 +110,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` - Bump Lucene from 9.7.0 to 9.8.0 ([#10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) - Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) +- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 243a1b2c6f57e..e54a5a1089a93 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -28,7 +28,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.97.Final +netty = 4.1.99.Final joda = 2.12.2 # client dependencies diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8430355365996..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..5b393be40e945 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 deleted file mode 100644 index 7a36dc1f2724f..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..45ea27d29a183 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 deleted file mode 100644 index 37b78a32f741f..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6bb7fcd68b272 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 deleted file mode 100644 index cbf685a6d79d3..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..f9bdefc6dd965 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index 1bdfec3aae6ba..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..d53adfa649f5f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8b7b50a6fc9c6..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..258f7c957dda0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 deleted file mode 100644 index 032959e98d009..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..b8bc0a4370f58 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 deleted file mode 100644 index 107863c1b3c9d..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..247975e0a64c7 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index f736d37d071b7..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6b7b66ea768e3 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 deleted file mode 100644 index f592ac8312a5d..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d266d079ef33cf93a16b382d64dd15d562df1159 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6c1112ed49775 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +21c76a42a468faafac6c84f8aca775073fc8e345 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 deleted file mode 100644 index cbf685a6d79d3..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..f9bdefc6dd965 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 deleted file mode 100644 index d06147a0ba646..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30e8fa29a349db5a933225d61891b8802836bb79 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..717703c36e1ab --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +259bf1c5178c3e23bb89a2fab59b6d22846e3fa6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 deleted file mode 100644 index 67c3a763d26fa..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a99ecef0e1d86a92e40a7c89805c236d9cd7493e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..3f69ae54c5d4a --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +8c8a89ea89b06e120c57bdb3db14b9a47ca30bb3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 deleted file mode 100644 index 60fd706436ae7..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2c50f835777ecd4535e15b552b5d9ccb26a2504f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..adef44a4e7da7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +804d8b752847923d3bb81f24de604597047c9b2e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index f736d37d071b7..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6b7b66ea768e3 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 deleted file mode 100644 index c6fa4cc175222..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afec3c414a0ab7264a66a7572e9e9d3a19a3e0e5 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..0756635018837 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +a45aa70bc50d0500da5cdcd595cc838d87ada987 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8430355365996..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..5b393be40e945 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 deleted file mode 100644 index 7a36dc1f2724f..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..45ea27d29a183 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 deleted file mode 100644 index 37b78a32f741f..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6bb7fcd68b272 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 deleted file mode 100644 index cbf685a6d79d3..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..f9bdefc6dd965 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index 1bdfec3aae6ba..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..d53adfa649f5f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8b7b50a6fc9c6..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..258f7c957dda0 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 deleted file mode 100644 index 032959e98d009..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..b8bc0a4370f58 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 deleted file mode 100644 index 107863c1b3c9d..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..247975e0a64c7 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8e40c8826d76d..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -795da37ded759e862457a82d9d92c4d39ce8ecee \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..75b64ad4197d8 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +961bd5b8d97ea6a07168176462f398089a24b5c8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index f736d37d071b7..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6b7b66ea768e3 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8430355365996..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..5b393be40e945 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 deleted file mode 100644 index 7a36dc1f2724f..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..45ea27d29a183 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 deleted file mode 100644 index 37b78a32f741f..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6bb7fcd68b272 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index 1bdfec3aae6ba..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..d53adfa649f5f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8b7b50a6fc9c6..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..258f7c957dda0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 deleted file mode 100644 index 032959e98d009..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..b8bc0a4370f58 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 deleted file mode 100644 index 107863c1b3c9d..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..247975e0a64c7 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file From 3a790c150e6a8b45d385341e276da6f226489515 Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Mon, 2 Oct 2023 22:37:17 -0700 Subject: [PATCH 14/14] Disable concurrent search when terminate_after is used (#10200) Signed-off-by: Jay Deng --- CHANGELOG.md | 1 + .../simple/ParameterizedSimpleSearchIT.java | 608 ----------------- .../search/simple/SimpleSearchIT.java | 645 +++++++++++++++++- .../search/DefaultSearchContext.java | 2 + 4 files changed, 645 insertions(+), 611 deletions(-) delete mode 100644 server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b4e6408f3491..9963eaef31d33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -120,6 +120,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) - [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) - Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) +- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java deleted file mode 100644 index 719b75079da92..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java +++ /dev/null @@ -1,608 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.search.simple; - -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.search.SearchPhaseExecutionException; -import org.opensearch.action.search.SearchRequestBuilder; -import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.WriteRequest.RefreshPolicy; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.query.QueryBuilders; -import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.search.rescore.QueryRescorerBuilder; -import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.ExecutionException; - -import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.index.query.QueryBuilders.boolQuery; -import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.index.query.QueryBuilders.queryStringQuery; -import static org.opensearch.index.query.QueryBuilders.rangeQuery; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; - -public class ParameterizedSimpleSearchIT extends ParameterizedOpenSearchIntegTestCase { - - public ParameterizedSimpleSearchIT(Settings settings) { - super(settings); - } - - @ParametersFactory - public static Collection parameters() { - return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } - ); - } - - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - - public void testSearchNullIndex() { - expectThrows( - NullPointerException.class, - () -> client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() - ); - - expectThrows( - NullPointerException.class, - () -> client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() - ); - - } - - public void testSearchRandomPreference() throws InterruptedException, ExecutionException { - createIndex("test"); - indexRandom( - true, - client().prepareIndex("test").setId("1").setSource("field", "value"), - client().prepareIndex("test").setId("2").setSource("field", "value"), - client().prepareIndex("test").setId("3").setSource("field", "value"), - client().prepareIndex("test").setId("4").setSource("field", "value"), - client().prepareIndex("test").setId("5").setSource("field", "value"), - client().prepareIndex("test").setId("6").setSource("field", "value") - ); - - int iters = scaledRandomIntBetween(10, 20); - for (int i = 0; i < iters; i++) { - String randomPreference = randomUnicodeOfLengthBetween(0, 4); - // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary) - while (randomPreference.startsWith("_")) { - randomPreference = randomUnicodeOfLengthBetween(0, 4); - } - // id is not indexed, but lets see that we automatically convert to - SearchResponse searchResponse = client().prepareSearch() - .setQuery(QueryBuilders.matchAllQuery()) - .setPreference(randomPreference) - .get(); - assertHitCount(searchResponse, 6L); - - } - } - - public void testSimpleIp() throws Exception { - createIndex("test"); - - client().admin() - .indices() - .preparePutMapping("test") - .setSource( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("from") - .field("type", "ip") - .endObject() - .startObject("to") - .field("type", "ip") - .endObject() - .endObject() - .endObject() - .endObject() - ) - .get(); - - client().prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); - - SearchResponse search = client().prepareSearch() - .setQuery(boolQuery().must(rangeQuery("from").lte("192.168.0.7")).must(rangeQuery("to").gte("192.168.0.7"))) - .get(); - - assertHitCount(search, 1L); - } - - public void testIpCidr() throws Exception { - createIndex("test"); - - client().admin() - .indices() - .preparePutMapping("test") - .setSource( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("ip") - .field("type", "ip") - .endObject() - .endObject() - .endObject() - .endObject() - ) - .get(); - ensureGreen(); - - client().prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); - client().prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); - client().prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); - client().prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); - client().prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); - refresh(); - - SearchResponse search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(queryStringQuery("ip: 192.168.0.1")).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1/32"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.0/24"))).get(); - assertHitCount(search, 3L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.0.0.0/8"))).get(); - assertHitCount(search, 4L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0.0.0.0/0"))).get(); - assertHitCount(search, 4L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::ff00:42:8329/128"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::/64"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "::/0"))).get(); - assertHitCount(search, 5L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.1.5/32"))).get(); - assertHitCount(search, 0L); - - assertFailures( - client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0/0/0/0/0"))), - RestStatus.BAD_REQUEST, - containsString("Expected [ip/prefix] but was [0/0/0/0/0]") - ); - } - - public void testSimpleId() { - createIndex("test"); - - client().prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - // id is not indexed, but lets see that we automatically convert to - SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).get(); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).get(); - assertHitCount(searchResponse, 1L); - } - - public void testSimpleDateRange() throws Exception { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); - client().prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); - ensureGreen(); - refresh(); - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lte("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lt("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gt("2010-01-05T02:00").lt("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 0L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.queryStringQuery("field:[2010-01-03||+2d TO 2010-01-04||+2d/d]")) - .get(); - assertHitCount(searchResponse, 2L); - } - - // TODO: combine this test with SimpleSearchIT.testSimpleTerminateAfterCount after - // https://github.com/opensearch-project/OpenSearch/issues/8371 - public void testSimpleTerminateAfterCountWithSizeAndTrackHits() throws Exception { - prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); - ensureGreen(); - int numDocs = randomIntBetween(15, 29); - List docbuilders = new ArrayList<>(numDocs); - - for (int i = 1; i <= numDocs; i++) { - String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); - } - - indexRandom(true, docbuilders); - ensureGreen(); - refresh(); - - SearchResponse searchResponse; - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.matchAllQuery()) - .setTerminateAfter(numDocs) - .setSize(0) - .setTrackTotalHits(true) - .get(); - assertEquals(0, searchResponse.getFailedShards()); - } - - public void testSimpleIndexSortEarlyTerminate() throws Exception { - prepareCreate("test").setSettings( - Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") - ).setMapping("rank", "type=integer").get(); - ensureGreen(); - int max = randomIntBetween(3, 29); - List docbuilders = new ArrayList<>(max); - - for (int i = max - 1; i >= 0; i--) { - String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("rank", i)); - } - - indexRandom(true, docbuilders); - ensureGreen(); - refresh(); - - SearchResponse searchResponse; - for (int i = 1; i < max; i++) { - searchResponse = client().prepareSearch("test") - .addDocValueField("rank") - .setTrackTotalHits(false) - .addSort("rank", SortOrder.ASC) - .setSize(i) - .get(); - assertNull(searchResponse.getHits().getTotalHits()); - for (int j = 0; j < i; j++) { - assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); - } - } - } - - public void testInsaneFromAndSize() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertWindowFails(client().prepareSearch("idx").setFrom(Integer.MAX_VALUE)); - assertWindowFails(client().prepareSearch("idx").setSize(Integer.MAX_VALUE)); - } - - public void testTooLargeFromAndSize() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertWindowFails(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); - assertWindowFails(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); - assertWindowFails( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - ); - } - - public void testLargeFromAndSizeSucceeds() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2 - 1) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeOkBySetting() throws Exception { - prepareCreate("idx").setSettings( - Settings.builder() - .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) - ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings( - Settings.builder() - .put( - IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), - IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2 - ) - ) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { - prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) - .get(), - 1 - ); - } - - public void testTooLargeRescoreWindow() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertRescoreWindowFails(Integer.MAX_VALUE); - assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); - } - - public void testTooLargeRescoreOkBySetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) - .get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - prepareCreate("idx").setSettings( - Settings.builder() - .put( - IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), // Note that this is the RESULT window. - defaultMaxWindow * 2 - ) - ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByDynamicSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings( - // Note that this is the RESULT window - Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), defaultMaxWindow * 2) - ) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testQueryNumericFieldWithRegex() throws Exception { - assertAcked(prepareCreate("idx").setMapping("num", "type=integer")); - ensureGreen("idx"); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", "34")).get(); - fail("SearchPhaseExecutionException should have been thrown"); - } catch (SearchPhaseExecutionException ex) { - assertThat(ex.getRootCause().getMessage(), containsString("Can only use regexp queries on keyword and text fields")); - } - } - - public void testTermQueryBigInt() throws Exception { - prepareCreate("idx").setMapping("field", "type=keyword").get(); - ensureGreen("idx"); - - client().prepareIndex("idx") - .setId("1") - .setSource("{\"field\" : 80315953321748200608 }", MediaTypeRegistry.JSON) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - - String queryJson = "{ \"field\" : { \"value\" : 80315953321748200608 } }"; - XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); - parser.nextToken(); - TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(query).get(); - assertEquals(1, searchResponse.getHits().getTotalHits().value); - } - - public void testTooLongRegexInRegexpQuery() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); - StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); - while (regexp.length() <= defaultMaxRegexLength) { - regexp.append("]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*(?:,@(?:(?:\\r\\n)?[ \\t])*(?:[^()<>@,;:\\\\\".\\"); - } - SearchPhaseExecutionException e = expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())).get() - ); - assertThat( - e.getRootCause().getMessage(), - containsString( - "The length of regex [" - + regexp.length() - + "] used in the Regexp Query request has exceeded " - + "the allowed maximum of [" - + defaultMaxRegexLength - + "]. " - + "This maximum can be set by changing the [" - + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() - + "] index level setting." - ) - ); - } - - private void assertWindowFails(SearchRequestBuilder search) { - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); - assertThat( - e.toString(), - containsString( - "Result window is too large, from + size must be less than or equal to: [" - + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - ) - ); - assertThat(e.toString(), containsString("See the scroll api for a more efficient way to request large data sets")); - } - - private void assertRescoreWindowFails(int windowSize) { - SearchRequestBuilder search = client().prepareSearch("idx") - .addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(windowSize)); - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); - assertThat( - e.toString(), - containsString( - "Rescore window [" - + windowSize - + "] is too large. It must " - + "be less than [" - + IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) - ) - ); - assertThat( - e.toString(), - containsString( - "This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting." - ) - ); - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java index 67e460653245e..95b36311f6b8b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java @@ -6,24 +6,283 @@ * compatible open source license. */ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + package org.opensearch.search.simple; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.rescore.QueryRescorerBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; +import java.util.concurrent.ExecutionException; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.index.query.QueryBuilders.boolQuery; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.index.query.QueryBuilders.queryStringQuery; +import static org.opensearch.index.query.QueryBuilders.rangeQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.apache.lucene.search.TotalHits.Relation.EQUAL_TO; +import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; + +public class SimpleSearchIT extends ParameterizedOpenSearchIntegTestCase { + + public SimpleSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + + public void testSearchNullIndex() { + expectThrows( + NullPointerException.class, + () -> client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() + ); + + expectThrows( + NullPointerException.class, + () -> client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() + ); + + } + + public void testSearchRandomPreference() throws InterruptedException, ExecutionException { + createIndex("test"); + indexRandom( + true, + client().prepareIndex("test").setId("1").setSource("field", "value"), + client().prepareIndex("test").setId("2").setSource("field", "value"), + client().prepareIndex("test").setId("3").setSource("field", "value"), + client().prepareIndex("test").setId("4").setSource("field", "value"), + client().prepareIndex("test").setId("5").setSource("field", "value"), + client().prepareIndex("test").setId("6").setSource("field", "value") + ); + + int iters = scaledRandomIntBetween(10, 20); + for (int i = 0; i < iters; i++) { + String randomPreference = randomUnicodeOfLengthBetween(0, 4); + // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary) + while (randomPreference.startsWith("_")) { + randomPreference = randomUnicodeOfLengthBetween(0, 4); + } + // id is not indexed, but lets see that we automatically convert to + SearchResponse searchResponse = client().prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .setPreference(randomPreference) + .get(); + assertHitCount(searchResponse, 6L); + + } + } + + public void testSimpleIp() throws Exception { + createIndex("test"); + + client().admin() + .indices() + .preparePutMapping("test") + .setSource( + XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("from") + .field("type", "ip") + .endObject() + .startObject("to") + .field("type", "ip") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .get(); + + client().prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); + + SearchResponse search = client().prepareSearch() + .setQuery(boolQuery().must(rangeQuery("from").lte("192.168.0.7")).must(rangeQuery("to").gte("192.168.0.7"))) + .get(); + + assertHitCount(search, 1L); + } + + public void testIpCidr() throws Exception { + createIndex("test"); + + client().admin() + .indices() + .preparePutMapping("test") + .setSource( + XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("ip") + .field("type", "ip") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .get(); + ensureGreen(); + + client().prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); + client().prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); + client().prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); + client().prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); + client().prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); + refresh(); + + SearchResponse search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(queryStringQuery("ip: 192.168.0.1")).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1/32"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.0/24"))).get(); + assertHitCount(search, 3L); -public class SimpleSearchIT extends OpenSearchIntegTestCase { + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.0.0.0/8"))).get(); + assertHitCount(search, 4L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0.0.0.0/0"))).get(); + assertHitCount(search, 4L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::ff00:42:8329/128"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::/64"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "::/0"))).get(); + assertHitCount(search, 5L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.1.5/32"))).get(); + assertHitCount(search, 0L); + + assertFailures( + client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0/0/0/0/0"))), + RestStatus.BAD_REQUEST, + containsString("Expected [ip/prefix] but was [0/0/0/0/0]") + ); + } + + public void testSimpleId() { + createIndex("test"); + + client().prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + // id is not indexed, but lets see that we automatically convert to + SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).get(); + assertHitCount(searchResponse, 1L); + + searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).get(); + assertHitCount(searchResponse, 1L); + } + + public void testSimpleDateRange() throws Exception { + createIndex("test"); + client().prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); + client().prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); + ensureGreen(); + refresh(); + SearchResponse searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lte("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lt("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gt("2010-01-05T02:00").lt("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 0L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.queryStringQuery("field:[2010-01-03||+2d TO 2010-01-04||+2d/d]")) + .get(); + assertHitCount(searchResponse, 2L); + } - // TODO: Move this test to ParameterizedSimpleSearchIT after https://github.com/opensearch-project/OpenSearch/issues/8371 public void testSimpleTerminateAfterCount() throws Exception { prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen(); @@ -40,13 +299,18 @@ public void testSimpleTerminateAfterCount() throws Exception { refresh(); SearchResponse searchResponse; + int size; for (int i = 1; i < max; i++) { + size = randomIntBetween(0, max); searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)) .setTerminateAfter(i) + .setSize(size) + .setTrackTotalHits(true) .get(); assertHitCount(searchResponse, i); assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(Math.min(i, size), searchResponse.getHits().getHits().length); } searchResponse = client().prepareSearch("test") @@ -57,4 +321,379 @@ public void testSimpleTerminateAfterCount() throws Exception { assertHitCount(searchResponse, max); assertFalse(searchResponse.isTerminatedEarly()); } + + public void testSimpleTerminateAfterTrackTotalHitsUpTo() throws Exception { + prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); + ensureGreen(); + int numDocs = 29; + List docbuilders = new ArrayList<>(numDocs); + + for (int i = 1; i <= numDocs; i++) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + // size=0 is a special case where topDocsCollector is not added + int size = randomIntBetween(0, 1); + SearchResponse searchResponse; + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(10) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(10) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHits(true) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(numDocs * 2) + .setSize(size) + .setTrackTotalHits(true) + .get(); + assertFalse(searchResponse.isTerminatedEarly()); + assertEquals(numDocs, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + } + + public void testSimpleIndexSortEarlyTerminate() throws Exception { + prepareCreate("test").setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") + ).setMapping("rank", "type=integer").get(); + ensureGreen(); + int max = randomIntBetween(3, 29); + List docbuilders = new ArrayList<>(max); + + for (int i = max - 1; i >= 0; i--) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test").setId(id).setSource("rank", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + SearchResponse searchResponse; + for (int i = 1; i < max; i++) { + searchResponse = client().prepareSearch("test") + .addDocValueField("rank") + .setTrackTotalHits(false) + .addSort("rank", SortOrder.ASC) + .setSize(i) + .get(); + assertNull(searchResponse.getHits().getTotalHits()); + for (int j = 0; j < i; j++) { + assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); + } + } + } + + public void testInsaneFromAndSize() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertWindowFails(client().prepareSearch("idx").setFrom(Integer.MAX_VALUE)); + assertWindowFails(client().prepareSearch("idx").setSize(Integer.MAX_VALUE)); + } + + public void testTooLargeFromAndSize() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertWindowFails(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); + assertWindowFails(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); + assertWindowFails( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + ); + } + + public void testLargeFromAndSizeSucceeds() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2 - 1) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeOkBySetting() throws Exception { + prepareCreate("idx").setSettings( + Settings.builder() + .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) + ).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings( + Settings.builder() + .put( + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2 + ) + ) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { + prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) + .get(), + 1 + ); + } + + public void testTooLargeRescoreWindow() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertRescoreWindowFails(Integer.MAX_VALUE); + assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); + } + + public void testTooLargeRescoreOkBySetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) + .get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + prepareCreate("idx").setSettings( + Settings.builder() + .put( + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), // Note that this is the RESULT window. + defaultMaxWindow * 2 + ) + ).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByDynamicSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings( + // Note that this is the RESULT window + Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), defaultMaxWindow * 2) + ) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testQueryNumericFieldWithRegex() throws Exception { + assertAcked(prepareCreate("idx").setMapping("num", "type=integer")); + ensureGreen("idx"); + + try { + client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", "34")).get(); + fail("SearchPhaseExecutionException should have been thrown"); + } catch (SearchPhaseExecutionException ex) { + assertThat(ex.getRootCause().getMessage(), containsString("Can only use regexp queries on keyword and text fields")); + } + } + + public void testTermQueryBigInt() throws Exception { + prepareCreate("idx").setMapping("field", "type=keyword").get(); + ensureGreen("idx"); + + client().prepareIndex("idx") + .setId("1") + .setSource("{\"field\" : 80315953321748200608 }", MediaTypeRegistry.JSON) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); + + String queryJson = "{ \"field\" : { \"value\" : 80315953321748200608 } }"; + XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); + parser.nextToken(); + TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(query).get(); + assertEquals(1, searchResponse.getHits().getTotalHits().value); + } + + public void testTooLongRegexInRegexpQuery() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); + StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); + while (regexp.length() <= defaultMaxRegexLength) { + regexp.append("]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*(?:,@(?:(?:\\r\\n)?[ \\t])*(?:[^()<>@,;:\\\\\".\\"); + } + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())).get() + ); + assertThat( + e.getRootCause().getMessage(), + containsString( + "The length of regex [" + + regexp.length() + + "] used in the Regexp Query request has exceeded " + + "the allowed maximum of [" + + defaultMaxRegexLength + + "]. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() + + "] index level setting." + ) + ); + } + + private void assertWindowFails(SearchRequestBuilder search) { + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + assertThat( + e.toString(), + containsString( + "Result window is too large, from + size must be less than or equal to: [" + + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + ) + ); + assertThat(e.toString(), containsString("See the scroll api for a more efficient way to request large data sets")); + } + + private void assertRescoreWindowFails(int windowSize) { + SearchRequestBuilder search = client().prepareSearch("idx") + .addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(windowSize)); + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + assertThat( + e.toString(), + containsString( + "Rescore window [" + + windowSize + + "] is too large. It must " + + "be less than [" + + IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + ) + ); + assertThat( + e.toString(), + containsString( + "This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting." + ) + ); + } } diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 28931bb5a860f..960b46d68977b 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -896,6 +896,8 @@ public void evaluateRequestShouldUseConcurrentSearch() { && aggregations().factories() != null && !aggregations().factories().allFactoriesSupportConcurrentSearch()) { requestShouldUseConcurrentSearch.set(false); + } else if (terminateAfter != DEFAULT_TERMINATE_AFTER) { + requestShouldUseConcurrentSearch.set(false); } else { requestShouldUseConcurrentSearch.set(true); }