From 61495bf0a915216f623d51807aeb3353caffd296 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Fri, 29 Sep 2023 13:40:02 -0700 Subject: [PATCH 01/26] Refactor async blob read to avoid blocking calls, support non multipart calls (#10192) Signed-off-by: Kunal Kotwani --- .../repositories/s3/S3BlobContainer.java | 89 +++++++----- .../s3/S3BlobStoreContainerTests.java | 129 +++++++++++++++--- 2 files changed, 163 insertions(+), 55 deletions(-) diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 2911a018df337..c6ae58371e15c 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -228,35 +228,50 @@ public void readBlobAsync(String blobName, ActionListener listener) try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { final S3AsyncClient s3AsyncClient = amazonS3Reference.get().client(); final String bucketName = blobStore.bucket(); + final String blobKey = buildKey(blobName); - final GetObjectAttributesResponse blobMetadata = getBlobMetadata(s3AsyncClient, bucketName, blobName).get(); + final CompletableFuture blobMetadataFuture = getBlobMetadata(s3AsyncClient, bucketName, blobKey); - final long blobSize = blobMetadata.objectSize(); - final int numberOfParts = blobMetadata.objectParts().totalPartsCount(); - final String blobChecksum = blobMetadata.checksum().checksumCRC32(); - - final List blobPartStreams = new ArrayList<>(); - final List> blobPartInputStreamFutures = new ArrayList<>(); - // S3 multipart files use 1 to n indexing - for (int partNumber = 1; partNumber <= numberOfParts; partNumber++) { - blobPartInputStreamFutures.add(getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, partNumber)); - } - - CompletableFuture.allOf(blobPartInputStreamFutures.toArray(CompletableFuture[]::new)).whenComplete((unused, throwable) -> { - if (throwable == null) { - listener.onResponse( - new ReadContext( - blobSize, - blobPartInputStreamFutures.stream().map(CompletableFuture::join).collect(Collectors.toList()), - blobChecksum - ) - ); - } else { + blobMetadataFuture.whenComplete((blobMetadata, throwable) -> { + if (throwable != null) { Exception ex = throwable.getCause() instanceof Exception ? (Exception) throwable.getCause() : new Exception(throwable.getCause()); listener.onFailure(ex); + return; + } + + final List> blobPartInputStreamFutures = new ArrayList<>(); + final long blobSize = blobMetadata.objectSize(); + final Integer numberOfParts = blobMetadata.objectParts() == null ? null : blobMetadata.objectParts().totalPartsCount(); + final String blobChecksum = blobMetadata.checksum().checksumCRC32(); + + if (numberOfParts == null) { + blobPartInputStreamFutures.add(getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, null)); + } else { + // S3 multipart files use 1 to n indexing + for (int partNumber = 1; partNumber <= numberOfParts; partNumber++) { + blobPartInputStreamFutures.add(getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, partNumber)); + } } + + CompletableFuture.allOf(blobPartInputStreamFutures.toArray(CompletableFuture[]::new)) + .whenComplete((unused, partThrowable) -> { + if (partThrowable == null) { + listener.onResponse( + new ReadContext( + blobSize, + blobPartInputStreamFutures.stream().map(CompletableFuture::join).collect(Collectors.toList()), + blobChecksum + ) + ); + } else { + Exception ex = partThrowable.getCause() instanceof Exception + ? (Exception) partThrowable.getCause() + : new Exception(partThrowable.getCause()); + listener.onFailure(ex); + } + }); }); } catch (Exception ex) { listener.onFailure(SdkException.create("Error occurred while fetching blob parts from the repository", ex)); @@ -685,41 +700,47 @@ static Tuple numberOfMultiparts(final long totalSize, final long par * the stream and its related metadata. * @param s3AsyncClient Async client to be utilized to fetch the object part * @param bucketName Name of the S3 bucket - * @param blobName Identifier of the blob for which the parts will be fetched - * @param partNumber Part number for the blob to be retrieved + * @param blobKey Identifier of the blob for which the parts will be fetched + * @param partNumber Optional part number for the blob to be retrieved * @return A future of {@link InputStreamContainer} containing the stream and stream metadata. */ CompletableFuture getBlobPartInputStreamContainer( S3AsyncClient s3AsyncClient, String bucketName, - String blobName, - int partNumber + String blobKey, + @Nullable Integer partNumber ) { - final GetObjectRequest.Builder getObjectRequestBuilder = GetObjectRequest.builder() - .bucket(bucketName) - .key(blobName) - .partNumber(partNumber); + final boolean isMultipartObject = partNumber != null; + final GetObjectRequest.Builder getObjectRequestBuilder = GetObjectRequest.builder().bucket(bucketName).key(blobKey); + + if (isMultipartObject) { + getObjectRequestBuilder.partNumber(partNumber); + } return SocketAccess.doPrivileged( () -> s3AsyncClient.getObject(getObjectRequestBuilder.build(), AsyncResponseTransformer.toBlockingInputStream()) - .thenApply(S3BlobContainer::transformResponseToInputStreamContainer) + .thenApply(response -> transformResponseToInputStreamContainer(response, isMultipartObject)) ); } /** * Transforms the stream response object from S3 into an {@link InputStreamContainer} * @param streamResponse Response stream object from S3 + * @param isMultipartObject Flag to denote a multipart object response * @return {@link InputStreamContainer} containing the stream and stream metadata */ // Package-Private for testing. - static InputStreamContainer transformResponseToInputStreamContainer(ResponseInputStream streamResponse) { + static InputStreamContainer transformResponseToInputStreamContainer( + ResponseInputStream streamResponse, + boolean isMultipartObject + ) { final GetObjectResponse getObjectResponse = streamResponse.response(); final String contentRange = getObjectResponse.contentRange(); final Long contentLength = getObjectResponse.contentLength(); - if (contentRange == null || contentLength == null) { + if ((isMultipartObject && contentRange == null) || contentLength == null) { throw SdkException.builder().message("Failed to fetch required metadata for blob part").build(); } - final Long offset = HttpRangeUtils.getStartOffsetFromRangeHeader(getObjectResponse.contentRange()); + final long offset = isMultipartObject ? HttpRangeUtils.getStartOffsetFromRangeHeader(getObjectResponse.contentRange()) : 0L; return new InputStreamContainer(streamResponse, getObjectResponse.contentLength(), offset); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index a87c060dcc60a..9817d7cd520ef 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -81,7 +81,6 @@ import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.unit.ByteSizeUnit; -import org.opensearch.repositories.s3.async.AsyncTransferManager; import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayInputStream; @@ -100,7 +99,6 @@ import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -919,7 +917,7 @@ public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanNumberO testListBlobsByPrefixInLexicographicOrder(12, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } - public void testReadBlobAsync() throws Exception { + public void testReadBlobAsyncMultiPart() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); final String blobName = randomAlphaOfLengthBetween(1, 10); final String checksum = randomAlphaOfLength(10); @@ -932,11 +930,7 @@ public void testReadBlobAsync() throws Exception { final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) ); - final AsyncTransferManager asyncTransferManager = new AsyncTransferManager( - 10000L, - mock(ExecutorService.class), - mock(ExecutorService.class) - ); + final S3BlobStore blobStore = mock(S3BlobStore.class); final BlobPath blobPath = new BlobPath(); @@ -944,7 +938,6 @@ public void testReadBlobAsync() throws Exception { when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); when(blobStore.serverSideEncryption()).thenReturn(false); when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); - when(blobStore.getAsyncTransferManager()).thenReturn(asyncTransferManager); CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); getObjectAttributesResponseCompletableFuture.complete( @@ -984,6 +977,60 @@ public void testReadBlobAsync() throws Exception { } } + public void testReadBlobAsyncSinglePart() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final int objectSize = 100; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) + ); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + + CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize((long) objectSize) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + mockObjectResponse(s3AsyncClient, bucketName, blobName, objectSize); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(1, readContextActionListener.getResponseCount()); + assertEquals(0, readContextActionListener.getFailureCount()); + ReadContext readContext = readContextActionListener.getResponse(); + assertEquals(1, readContext.getNumberOfParts()); + assertEquals(checksum, readContext.getBlobChecksum()); + assertEquals(objectSize, readContext.getBlobSize()); + + InputStreamContainer inputStreamContainer = readContext.getPartStreams().stream().findFirst().get(); + assertEquals(objectSize, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + assertEquals(objectSize, inputStreamContainer.getInputStream().readAllBytes().length); + + } + public void testReadBlobAsyncFailure() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); final String blobName = randomAlphaOfLengthBetween(1, 10); @@ -996,11 +1043,7 @@ public void testReadBlobAsyncFailure() throws Exception { final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) ); - final AsyncTransferManager asyncTransferManager = new AsyncTransferManager( - 10000L, - mock(ExecutorService.class), - mock(ExecutorService.class) - ); + final S3BlobStore blobStore = mock(S3BlobStore.class); final BlobPath blobPath = new BlobPath(); @@ -1008,7 +1051,6 @@ public void testReadBlobAsyncFailure() throws Exception { when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); when(blobStore.serverSideEncryption()).thenReturn(false); when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); - when(blobStore.getAsyncTransferManager()).thenReturn(asyncTransferManager); CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); getObjectAttributesResponseCompletableFuture.complete( @@ -1071,7 +1113,7 @@ public void testGetBlobPartInputStream() throws Exception { final String blobName = randomAlphaOfLengthBetween(1, 10); final String bucketName = randomAlphaOfLengthBetween(1, 10); final long contentLength = 10L; - final String contentRange = "bytes 0-10/100"; + final String contentRange = "bytes 10-20/100"; final InputStream inputStream = ResponseInputStream.nullInputStream(); final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); @@ -1095,9 +1137,17 @@ public void testGetBlobPartInputStream() throws Exception { ) ).thenReturn(getObjectPartResponse); + // Header based offset in case of a multi part object request InputStreamContainer inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, 0) .get(); + assertEquals(10, inputStreamContainer.getOffset()); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); + + // 0 offset in case of a single part object request + inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, null).get(); + assertEquals(0, inputStreamContainer.getOffset()); assertEquals(contentLength, inputStreamContainer.getContentLength()); assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); @@ -1108,28 +1158,65 @@ public void testTransformResponseToInputStreamContainer() throws Exception { final long contentLength = 10L; final InputStream inputStream = ResponseInputStream.nullInputStream(); - final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); - GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength(contentLength).build(); + // Exception when content range absent for multipart object ResponseInputStream responseInputStreamNoRange = new ResponseInputStream<>(getObjectResponse, inputStream); - assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoRange)); + assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoRange, true)); + + // No exception when content range absent for single part object + ResponseInputStream responseInputStreamNoRangeSinglePart = new ResponseInputStream<>( + getObjectResponse, + inputStream + ); + InputStreamContainer inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer( + responseInputStreamNoRangeSinglePart, + false + ); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + // Exception when length is absent getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).build(); ResponseInputStream responseInputStreamNoContentLength = new ResponseInputStream<>( getObjectResponse, inputStream ); - assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoContentLength)); + assertThrows( + SdkException.class, + () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoContentLength, true) + ); + // No exception when range and length both are present getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).contentLength(contentLength).build(); ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); - InputStreamContainer inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer(responseInputStream); + inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer(responseInputStream, true); assertEquals(contentLength, inputStreamContainer.getContentLength()); assertEquals(0, inputStreamContainer.getOffset()); assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); } + private void mockObjectResponse(S3AsyncClient s3AsyncClient, String bucketName, String blobName, int objectSize) { + + final InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(objectSize)); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength((long) objectSize).build(); + + CompletableFuture> getObjectPartResponse = new CompletableFuture<>(); + ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + getObjectPartResponse.complete(responseInputStream); + + GetObjectRequest getObjectRequest = GetObjectRequest.builder().bucket(bucketName).key(blobName).build(); + + when( + s3AsyncClient.getObject( + eq(getObjectRequest), + ArgumentMatchers.>>any() + ) + ).thenReturn(getObjectPartResponse); + + } + private void mockObjectPartResponse( S3AsyncClient s3AsyncClient, String bucketName, From be247669c4d5a4494958a508c2fcde4e36c48499 Mon Sep 17 00:00:00 2001 From: Sagar <99425694+sgup432@users.noreply.github.com> Date: Fri, 29 Sep 2023 22:21:37 -0700 Subject: [PATCH 02/26] [Search latency - Coordinator] Changing version check to 2.11 (#10280) * [Search latency - Coordinator] Changing version check to 2.11 Signed-off-by: Sagar Upadhyaya * [Port main] update version check as per v2.11.0 [Backport 2.x] Indexing: add Doc status counter (#10267) * Indexing: add Doc Status Counter (#8716) Currently, Opensearch returns a 200 OK response code for a Bulk API call, even though there can be partial/complete failures within the request E2E. This provides doc level stats with respect to the rest status code as 2xx, 4xx, 5xx etc. Signed-off-by: Rohit Ashiwal (cherry picked from commit d656e3db592f29466d35452867caa241f5429485) Signed-off-by: github-actions[bot] (cherry picked from commit 94173e3f8b343ba29db24e5682d1706bbda9f9a4) Signed-off-by: Rohit Ashiwal Signed-off-by: Sagar Upadhyaya --------- Signed-off-by: Sagar Upadhyaya Signed-off-by: Rohit Ashiwal Signed-off-by: github-actions[bot] --- .../rest-api-spec/test/nodes.stats/11_indices_metrics.yml | 4 ++-- .../java/org/opensearch/index/search/stats/SearchStats.java | 4 ++-- .../main/java/org/opensearch/index/shard/IndexingStats.java | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 3f79227ce64e8..784c7b52b18b4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -141,8 +141,8 @@ --- "Metric - indexing doc_status": - skip: - version: " - 2.99.99" - reason: "To be introduced in future release :: TODO: change if/when we backport to 2.x" + version: " - 2.10.99" + reason: "Doc Status Stats were introduced in v2.11.0" - do: nodes.info: {} - set: diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index 14aaf7e58a59c..1f9144b28f286 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -242,7 +242,7 @@ private Stats(StreamInput in) throws IOException { pitCurrent = in.readVLong(); } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { this.requestStatsLongHolder = new RequestStatsLongHolder(); requestStatsLongHolder.requestStatsHolder = in.readMap(StreamInput::readString, PhaseStatsLongHolder::new); } @@ -437,7 +437,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(pitCurrent); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { if (requestStatsLongHolder == null) { requestStatsLongHolder = new RequestStatsLongHolder(); } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java index f1abea81a6511..89cbc59403faf 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java @@ -172,7 +172,7 @@ public Stats(StreamInput in) throws IOException { isThrottled = in.readBoolean(); throttleTimeInMillis = in.readLong(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { docStatusStats = in.readOptionalWriteable(DocStatusStats::new); } else { docStatusStats = null; @@ -308,7 +308,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isThrottled); out.writeLong(throttleTimeInMillis); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { out.writeOptionalWriteable(docStatusStats); } } From e1565821a28109c5c7f773e77764fc5104de7f88 Mon Sep 17 00:00:00 2001 From: Heemin Kim Date: Sat, 30 Sep 2023 09:02:49 -0700 Subject: [PATCH 03/26] Pass parent filter to inner query in nested query (#10246) Pass parent filter to inner query so that inner query can utilize the information Signed-off-by: Heemin Kim --- CHANGELOG.md | 1 + .../index/query/NestedQueryBuilder.java | 3 +++ .../index/query/QueryShardContext.java | 9 +++++++++ .../index/query/NestedQueryBuilderTests.java | 20 +++++++++++++++++++ 4 files changed, 33 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d1ca935193c3..f6c29d9b64f68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -115,6 +115,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) - Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) - [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) +- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java index 33fe96597b4f8..ac4fde7f06b16 100644 --- a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java @@ -318,10 +318,13 @@ protected Query doToQuery(QueryShardContext context) throws IOException { parentFilter = context.bitsetFilter(objectMapper.nestedTypeFilter()); } + BitSetProducer previousParentFilter = context.getParentFilter(); try { + context.setParentFilter(parentFilter); context.nestedScope().nextLevel(nestedObjectMapper); innerQuery = this.query.toQuery(context); } finally { + context.setParentFilter(previousParentFilter); context.nestedScope().previousLevel(); } diff --git a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java index 0bf05e633bba3..701484fbc8dc3 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java @@ -115,6 +115,7 @@ public class QueryShardContext extends QueryRewriteContext { private boolean mapUnmappedFieldAsString; private NestedScope nestedScope; private final ValuesSourceRegistry valuesSourceRegistry; + private BitSetProducer parentFilter; public QueryShardContext( int shardId, @@ -622,4 +623,12 @@ public BitsetFilterCache getBitsetFilterCache() { public AggregationUsageService getUsageService() { return valuesSourceRegistry.getUsageService(); } + + public BitSetProducer getParentFilter() { + return parentFilter; + } + + public void setParentFilter(BitSetProducer parentFilter) { + this.parentFilter = parentFilter; + } } diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index 0d66654a70f08..62337264bc0b1 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -66,6 +66,8 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class NestedQueryBuilderTests extends AbstractQueryTestCase { @@ -411,4 +413,22 @@ public void testDisallowExpensiveQueries() { OpenSearchException e = expectThrows(OpenSearchException.class, () -> queryBuilder.toQuery(queryShardContext)); assertEquals("[joining] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", e.getMessage()); } + + public void testSetParentFilterInContext() throws Exception { + QueryShardContext queryShardContext = createShardContext(); + QueryBuilder innerQueryBuilder = spy(new MatchAllQueryBuilderTests().createTestQueryBuilder()); + when(innerQueryBuilder.toQuery(queryShardContext)).thenAnswer(invoke -> { + QueryShardContext context = invoke.getArgument(0); + if (context.getParentFilter() == null) { + throw new Exception("Expect parent filter to be non-null"); + } + return invoke.callRealMethod(); + }); + NestedQueryBuilder nqb = new NestedQueryBuilder("nested1", innerQueryBuilder, RandomPicks.randomFrom(random(), ScoreMode.values())); + + assertNull(queryShardContext.getParentFilter()); + nqb.rewrite(queryShardContext).toQuery(queryShardContext); + assertNull(queryShardContext.getParentFilter()); + verify(innerQueryBuilder).toQuery(queryShardContext); + } } From 797def6e50389d88634b6ca614ceaba407ee983f Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Sat, 30 Sep 2023 18:45:30 -0700 Subject: [PATCH 04/26] Upgrade Lucene to 9.8.0 (#10276) 9.8.0 was officially released this morning: https://lucene.apache.org/core/9_8_0/changes/Changes.html Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - libs/core/licenses/lucene-core-9.8.0.jar.sha1 | 1 + .../licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 | 1 + .../lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 | 1 + .../lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 | 1 + .../lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 | 1 + .../lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 | 1 + .../lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 | 1 + .../lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../licenses/lucene-analysis-stempel-9.8.0.jar.sha1 | 1 + .../lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - .../licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 | 1 + .../lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-analysis-common-9.8.0.jar.sha1 | 1 + .../lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 | 1 + server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-core-9.8.0.jar.sha1 | 1 + server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-grouping-9.8.0.jar.sha1 | 1 + .../licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-highlighter-9.8.0.jar.sha1 | 1 + server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-join-9.8.0.jar.sha1 | 1 + server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-memory-9.8.0.jar.sha1 | 1 + server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-misc-9.8.0.jar.sha1 | 1 + server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-queries-9.8.0.jar.sha1 | 1 + .../licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-queryparser-9.8.0.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.8.0.jar.sha1 | 1 + .../lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 | 1 + .../licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-spatial3d-9.8.0.jar.sha1 | 1 + server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 - server/licenses/lucene-suggest-9.8.0.jar.sha1 | 1 + 48 files changed, 25 insertions(+), 24 deletions(-) delete mode 100644 libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 libs/core/licenses/lucene-core-9.8.0.jar.sha1 delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-core-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-join-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.8.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.8.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index f6c29d9b64f68..772198d5d0544 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -107,6 +107,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` - Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` - Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 71b3e267700b1..dae68940a7b7f 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.8.0-snapshot-95cdd2e +lucene = 9.8.0 bundled_jdk_vendor = adoptium bundled_jdk = 20.0.2+9 diff --git a/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 70baf1270cd5d..0000000000000 --- a/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2f7fbc5b2c49ca777a169d579f41082a9a57cc7 \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.8.0.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..f9a3e2f3cbee6 --- /dev/null +++ b/libs/core/licenses/lucene-core-9.8.0.jar.sha1 @@ -0,0 +1 @@ +5e8421c5f8573bcf22e9265fc7e19469545a775a \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c1daa91dd5433..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57e2b0cca55da8ad856dfd60be42e6daabbc98c3 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..892865a017f48 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 @@ -0,0 +1 @@ +7725476acfcb9bdfeff1b813ce15c39c6b857dc2 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 035b47c5f388c..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0deb3b85eadf831be17b48acab0785fd9d34fc44 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..ef410899981ca --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 @@ -0,0 +1 @@ +7133d34e92770f59eb28686f4d511b9f3f32e970 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 6ff5a433f0a4e..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a204267d68ce4ba36bfddc366cd6865cf5e1378 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..46b83c9e40b3a --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 @@ -0,0 +1 @@ +be44282e1f6b91a0650fcceb558053d6bdd4863d \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index a65ab33a31e2a..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -71e8e811f873ba2b47c7ecf9d890cbeac5b6be41 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..36664695a7818 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 @@ -0,0 +1 @@ +bd1f80d33346f7e588685484ef29a304db5190e4 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 04ab7b7e7adb8..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6e1274273895365bd83391cc4b79f5264479f5de \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..003ccdf8b0727 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 @@ -0,0 +1 @@ +b9ffdc7a52d2087ecb03318ec06305b480cdfe82 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index cef3f97d03c51..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e634c8685edad2bdb5c13748b18c0c1a46bb63a3 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..e22eaa474016f --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 @@ -0,0 +1 @@ +f73e2007b133fb699e517ef13b4952844f0150d8 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 3e2dd19a9dd85..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0afdf2afacbae39414ed06325fbb4bed17c07a7d \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..1ebe42a2a2f56 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 @@ -0,0 +1 @@ +2c09cbc021a8f81a01600a1d2a999361e70f7aed \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 8c0544acd1ca0..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -166e2ea297182f7bf7070af02aacea9e6a3a19c8 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..3c4523d45c0f5 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 @@ -0,0 +1 @@ +b054f2c7b11fc7c5601b4c3cdf18aa7508612898 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 4ac89f2e792d7..0000000000000 --- a/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c82be3d997d781bb72d6d0eadade064dd2cd6db \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..6ad304fa52c12 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 @@ -0,0 +1 @@ +36f0363325ca7bf62c180160d1ed5165c7c37795 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 624b5174a444f..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c261d17c681c0d91171c67e192abfef59adea2e \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..f104c4207d390 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 @@ -0,0 +1 @@ +e98fb408028f40170e6d87c16422bfdc0bb2e392 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 70baf1270cd5d..0000000000000 --- a/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2f7fbc5b2c49ca777a169d579f41082a9a57cc7 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0.jar.sha1 b/server/licenses/lucene-core-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..f9a3e2f3cbee6 --- /dev/null +++ b/server/licenses/lucene-core-9.8.0.jar.sha1 @@ -0,0 +1 @@ +5e8421c5f8573bcf22e9265fc7e19469545a775a \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 20ddb9ae3ef27..0000000000000 --- a/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d1cf3d6db43fad6630376ba59451f848f4d387c \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0.jar.sha1 b/server/licenses/lucene-grouping-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..ab132121b2edc --- /dev/null +++ b/server/licenses/lucene-grouping-9.8.0.jar.sha1 @@ -0,0 +1 @@ +d39184518351178c404ed9669fc6cb6111f2288d \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c3ad03ca53b13..0000000000000 --- a/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -83ab97638bb5269f950d75bba5675d3cfb63f2fa \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..c7cb678fb7b72 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.8.0.jar.sha1 @@ -0,0 +1 @@ +1ac38c8278dbd63dfab30744a41dd955a415a31c \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c2a4c5334b314..0000000000000 --- a/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97c26362151908dc892263edda3872abbacb71a8 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0.jar.sha1 b/server/licenses/lucene-join-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..2b6cb8af4faf6 --- /dev/null +++ b/server/licenses/lucene-join-9.8.0.jar.sha1 @@ -0,0 +1 @@ +3d64fc57bb6e718d906413a9f73c713e6d4d8bb0 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 32534d07e47dc..0000000000000 --- a/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8337eddc0dddd0d7dd50c5aa0d17e5e31592f9fa \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0.jar.sha1 b/server/licenses/lucene-memory-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..5fdfee401dd0a --- /dev/null +++ b/server/licenses/lucene-memory-9.8.0.jar.sha1 @@ -0,0 +1 @@ +5283ac71d6ccecb5e00c7b52df2faec012f2625a \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 7db245cc521c7..0000000000000 --- a/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a2e3fae930295f0e2b401effe04eafc25692a414 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0.jar.sha1 b/server/licenses/lucene-misc-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..cf815cba15862 --- /dev/null +++ b/server/licenses/lucene-misc-9.8.0.jar.sha1 @@ -0,0 +1 @@ +9a57b049cf51a5e9c9c1909c420f645f1b6f9a54 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index d01a6d733196e..0000000000000 --- a/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e88d8a464e6cfa345b946c9c8822ba7ee2a9159f \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0.jar.sha1 b/server/licenses/lucene-queries-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..09f369ef18e12 --- /dev/null +++ b/server/licenses/lucene-queries-9.8.0.jar.sha1 @@ -0,0 +1 @@ +628db4ef46f1c6a05145bdac1d1bc4ace6341b13 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c7b9640bad170..0000000000000 --- a/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9905790675c01e8dc24f9a5e6b9b28b879c65a52 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..2a42a8956b18b --- /dev/null +++ b/server/licenses/lucene-queryparser-9.8.0.jar.sha1 @@ -0,0 +1 @@ +982faf2bfa55542bf57fbadef54c19ac00f57cae \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c4cd9e47624f8..0000000000000 --- a/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6c8be427ec8ffc7e8233ffbf0d190d95a56cf14 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..64a0b07f72d29 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.8.0.jar.sha1 @@ -0,0 +1 @@ +06493dbd14d02537716822254866a94458f4d842 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index dfee145d3ea26..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -11716d61288feaa692593bf699affa8de2b564c4 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..d1bcb0581435c --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 @@ -0,0 +1 @@ +9d9a731822ad6eefa1ba288a0c158d478522f165 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c7410086ba86c..0000000000000 --- a/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a888e06c0535403b9e58a8dcddeb5e6513a4930 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..d17459cc569a9 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 @@ -0,0 +1 @@ +ce752a52b2d4eac90633c7df7982e29504f99e76 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 6d8d4205f4d02..0000000000000 --- a/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52dfc8bf135ed29f5baf0a967c1bb63dedb9a069 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0.jar.sha1 b/server/licenses/lucene-suggest-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..ff47b87672d2c --- /dev/null +++ b/server/licenses/lucene-suggest-9.8.0.jar.sha1 @@ -0,0 +1 @@ +f977f96f2093b7fddea6b67caa2e1c5b10edebf6 \ No newline at end of file From 6003560f944d5e63943d13c679797787458f1b5f Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 2 Oct 2023 13:04:38 -0400 Subject: [PATCH 05/26] Bump asm from 9.5 to 9.6 (#10302) Signed-off-by: Andriy Redko --- CHANGELOG.md | 9 +++++---- buildSrc/version.properties | 2 +- modules/lang-expression/licenses/asm-9.5.jar.sha1 | 1 - modules/lang-expression/licenses/asm-9.6.jar.sha1 | 1 + .../lang-expression/licenses/asm-commons-9.5.jar.sha1 | 1 - .../lang-expression/licenses/asm-commons-9.6.jar.sha1 | 1 + modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 | 1 - modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 | 1 + modules/lang-painless/licenses/asm-9.5.jar.sha1 | 1 - modules/lang-painless/licenses/asm-9.6.jar.sha1 | 1 + modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 | 1 - modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 | 1 + modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 | 1 - modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 | 1 + modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 | 1 - modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 | 1 + modules/lang-painless/licenses/asm-util-9.5.jar.sha1 | 1 - modules/lang-painless/licenses/asm-util-9.6.jar.sha1 | 1 + 18 files changed, 14 insertions(+), 13 deletions(-) delete mode 100644 modules/lang-expression/licenses/asm-9.5.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-9.6.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-9.5.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-9.6.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-util-9.5.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-util-9.6.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 772198d5d0544..e1834df696769 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -104,10 +104,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.4 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.0 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208)) - Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump Lucene from 9.7.0 to 9.8.0 ([#10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index dae68940a7b7f..243a1b2c6f57e 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -16,7 +16,7 @@ icu4j = 70.1 supercsv = 2.4.0 log4j = 2.20.0 slf4j = 1.7.36 -asm = 9.5 +asm = 9.6 jettison = 1.5.4 woodstox = 6.4.0 kotlin = 1.7.10 diff --git a/modules/lang-expression/licenses/asm-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-9.5.jar.sha1 deleted file mode 100644 index ea4aa3581dc87..0000000000000 --- a/modules/lang-expression/licenses/asm-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc6ea1875f4d64fbc85e1691c95b96a3d8569c90 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-9.6.jar.sha1 new file mode 100644 index 0000000000000..2d9e6a9d3cfd6 --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.6.jar.sha1 @@ -0,0 +1 @@ +aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 deleted file mode 100644 index 5be792660c19f..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19ab5b5800a3910d30d3a3e64fdb00fd0cb42de0 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 new file mode 100644 index 0000000000000..a0814f495771f --- /dev/null +++ b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 @@ -0,0 +1 @@ +f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 deleted file mode 100644 index fb42db6a9d15c..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd33c8b6373abaa675be407082fdfda35021254a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 new file mode 100644 index 0000000000000..101eb03b4b736 --- /dev/null +++ b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 @@ -0,0 +1 @@ +c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-9.5.jar.sha1 deleted file mode 100644 index ea4aa3581dc87..0000000000000 --- a/modules/lang-painless/licenses/asm-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc6ea1875f4d64fbc85e1691c95b96a3d8569c90 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-9.6.jar.sha1 new file mode 100644 index 0000000000000..2d9e6a9d3cfd6 --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.6.jar.sha1 @@ -0,0 +1 @@ +aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 deleted file mode 100644 index 9e87d3ce7d719..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -490bacc77de7cbc0be1a30bb3471072d705be4a4 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 new file mode 100644 index 0000000000000..fa42ea1198165 --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 @@ -0,0 +1 @@ +9ce6c7b174bd997fc2552dff47964546bd7a5ec3 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 deleted file mode 100644 index 5be792660c19f..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19ab5b5800a3910d30d3a3e64fdb00fd0cb42de0 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 new file mode 100644 index 0000000000000..a0814f495771f --- /dev/null +++ b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 @@ -0,0 +1 @@ +f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 deleted file mode 100644 index fb42db6a9d15c..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd33c8b6373abaa675be407082fdfda35021254a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 new file mode 100644 index 0000000000000..101eb03b4b736 --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 @@ -0,0 +1 @@ +c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 deleted file mode 100644 index 5fffbfe655deb..0000000000000 --- a/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -64b5a1fc8c1b15ed2efd6a063e976bc8d3dc5ffe \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 new file mode 100644 index 0000000000000..1f42ac62dc69c --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 @@ -0,0 +1 @@ +f77caf84eb93786a749b2baa40865b9613e3eaee \ No newline at end of file From fa66bebf88d6fc69765d5ecb26930ece94477024 Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Mon, 2 Oct 2023 13:31:02 -0700 Subject: [PATCH 06/26] Use of LogByteSizeMergePolicy for data stream use cases (#9992) * Configurable merge policy for index * additional setting to configure merge policy for timestamp based index * introduction of logbytesize merge policy as an option Signed-off-by: Rishabh Maurya * remove the trace log not required anymore Signed-off-by: Rishabh Maurya * Refactor the merge policy extraction logic Signed-off-by: Rishabh Maurya * Rename constant DEFAULT to DEFAULT_POLICY Signed-off-by: Rishabh Maurya * Simplify merge policy extraction and selection logic Signed-off-by: Rishabh Maurya * missing javadoc error Signed-off-by: Rishabh Maurya * Renaming log byte size policy setting with mb Signed-off-by: Rishabh Maurya * Move validation exception to enum from setting defn Signed-off-by: Rishabh Maurya * rename time_index to time_series_index Signed-off-by: Rishabh Maurya --------- Signed-off-by: Rishabh Maurya --- CHANGELOG.md | 1 + .../gateway/RecoveryFromGatewayIT.java | 4 +- .../RemoveCorruptedShardDataCommandIT.java | 4 +- .../index/store/CorruptedFileIT.java | 10 +- .../indices/stats/IndexStatsIT.java | 10 +- .../java/org/opensearch/update/UpdateIT.java | 4 +- .../common/settings/ClusterSettings.java | 1 + .../common/settings/IndexScopedSettings.java | 29 +- .../org/opensearch/index/IndexSettings.java | 156 ++++++- .../index/LogByteSizeMergePolicyProvider.java | 166 +++++++ .../opensearch/index/MergePolicyProvider.java | 31 ++ ...ig.java => TieredMergePolicyProvider.java} | 84 ++-- .../opensearch/index/shard/IndexShard.java | 2 +- .../segments/IndicesSegmentsRequestTests.java | 4 +- .../index/MergePolicySettingsTests.java | 421 +++++++++++++++--- .../index/MergeSchedulerSettingsTests.java | 8 +- .../RemoveCorruptedShardDataCommandTests.java | 4 +- .../indices/recovery/RecoveryTests.java | 4 +- .../test/OpenSearchIntegTestCase.java | 4 +- 19 files changed, 776 insertions(+), 171 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java create mode 100644 server/src/main/java/org/opensearch/index/MergePolicyProvider.java rename server/src/main/java/org/opensearch/index/{MergePolicyConfig.java => TieredMergePolicyProvider.java} (82%) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1834df696769..7519e242c0acf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) - Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 2bab61f3e1c4c..229cd7bffad2f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -53,7 +53,7 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.Engine; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.shard.ShardPath; @@ -519,7 +519,7 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { .put("number_of_replicas", 1) // disable merges to keep segments the same - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // expire retention leases quickly .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java index f8c2acbf99f70..b431079476624 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -73,7 +73,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.translog.TestTranslog; @@ -135,7 +135,7 @@ public void testCorruptIndex() throws Exception { Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "checksum") diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index 7e1d0792e3ddb..8291fef5d177b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -72,7 +72,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -167,7 +167,7 @@ public void testCorruptFileAndRecover() throws ExecutionException, InterruptedEx Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files @@ -286,7 +286,7 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose // no translog based flush - it might change the .liv / segments.N files @@ -552,7 +552,7 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files @@ -624,7 +624,7 @@ public void testReplicaCorruption() throws Exception { prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, cluster().numDataNodes() - 1) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index a0f01acd1f8e9..0967acb37d3e8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -66,8 +66,8 @@ import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; import org.opensearch.index.MergeSchedulerConfig; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.VersionType; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.engine.VersionConflictEngineException; @@ -589,8 +589,8 @@ public void testNonThrottleStats() throws Exception { prepareCreate("test").setSettings( settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000") ) @@ -621,8 +621,8 @@ public void testThrottleStats() throws Exception { prepareCreate("test").setSettings( settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "1") .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC.name()) diff --git a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java index 442268d513fc3..b46d27bafb2a5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java @@ -50,7 +50,7 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.DocumentMissingException; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.plugins.Plugin; @@ -669,7 +669,7 @@ public void run() { public void testStressUpdateDeleteConcurrency() throws Exception { // We create an index with merging disabled so that deletes don't get merged away - assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false))); + assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false))); ensureGreen(); Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 032027384f106..5261d40387dc6 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -457,6 +457,7 @@ public void apply(Settings value, Settings current, Settings previous) { NetworkService.TCP_CONNECT_TIMEOUT, IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, + IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY, ScriptService.SCRIPT_GENERAL_CACHE_SIZE_SETTING, ScriptService.SCRIPT_GENERAL_CACHE_EXPIRE_SETTING, ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 5b2afc44600bd..83bf8c82ee3dd 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -45,9 +45,11 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSortConfig; import org.opensearch.index.IndexingSlowLog; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.LogByteSizeMergePolicyProvider; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.SearchSlowLog; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.fielddata.IndexFieldDataService; @@ -120,14 +122,14 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING, - MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, IndexSortConfig.INDEX_SORT_FIELD_SETTING, IndexSortConfig.INDEX_SORT_ORDER_SETTING, IndexSortConfig.INDEX_SORT_MISSING_SETTING, @@ -202,6 +204,13 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED, IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME, IndexSettings.INDEX_MERGE_ON_FLUSH_POLICY, + IndexSettings.INDEX_MERGE_POLICY, + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING, IndexSettings.DEFAULT_SEARCH_PIPELINE, // Settings for Searchable Snapshots @@ -275,7 +284,7 @@ public boolean isPrivateSetting(String key) { case IndexMetadata.SETTING_HISTORY_UUID: case IndexMetadata.SETTING_VERSION_UPGRADED: case IndexMetadata.SETTING_INDEX_PROVIDED_NAME: - case MergePolicyConfig.INDEX_MERGE_ENABLED: + case MergePolicyProvider.INDEX_MERGE_ENABLED: // we keep the shrink settings for BWC - this can be removed in 8.0 // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 case "index.shrink.source.uuid": diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 1e4224c314f05..ce6c1a5ad6284 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -54,6 +54,7 @@ import org.opensearch.node.Node; import org.opensearch.search.pipeline.SearchPipelineService; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -83,9 +84,42 @@ */ @PublicApi(since = "1.0.0") public final class IndexSettings { - private static final String MERGE_ON_FLUSH_DEFAULT_POLICY = "default"; + private static final String DEFAULT_POLICY = "default"; private static final String MERGE_ON_FLUSH_MERGE_POLICY = "merge-on-flush"; + /** + * Enum representing supported merge policies + */ + public enum IndexMergePolicy { + TIERED("tiered"), + LOG_BYTE_SIZE("log_byte_size"), + DEFAULT_POLICY(IndexSettings.DEFAULT_POLICY); + + private final String value; + + IndexMergePolicy(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public static IndexMergePolicy fromString(String text) { + for (IndexMergePolicy policy : IndexMergePolicy.values()) { + if (policy.value.equals(text)) { + return policy; + } + } + throw new IllegalArgumentException( + "The setting has unsupported policy specified: " + + text + + ". Please use one of: " + + String.join(", ", Arrays.stream(IndexMergePolicy.values()).map(IndexMergePolicy::getValue).toArray(String[]::new)) + ); + } + } + public static final Setting> DEFAULT_FIELD_SETTING = Setting.listSetting( "index.query.default_field", Collections.singletonList("*"), @@ -566,11 +600,25 @@ public final class IndexSettings { public static final Setting INDEX_MERGE_ON_FLUSH_POLICY = Setting.simpleString( "index.merge_on_flush.policy", - MERGE_ON_FLUSH_DEFAULT_POLICY, + DEFAULT_POLICY, Property.IndexScope, Property.Dynamic ); + public static final Setting INDEX_MERGE_POLICY = Setting.simpleString( + "index.merge.policy", + DEFAULT_POLICY, + IndexMergePolicy::fromString, + Property.IndexScope + ); + + public static final Setting TIME_SERIES_INDEX_MERGE_POLICY = Setting.simpleString( + "indices.time_series_index.default_index_merge_policy", + DEFAULT_POLICY, + IndexMergePolicy::fromString, + Property.NodeScope + ); + public static final Setting SEARCHABLE_SNAPSHOT_REPOSITORY = Setting.simpleString( "index.searchable_snapshot.repository", Property.IndexScope, @@ -651,7 +699,8 @@ public final class IndexSettings { private volatile ByteSizeValue generationThresholdSize; private volatile ByteSizeValue flushAfterMergeThresholdSize; private final MergeSchedulerConfig mergeSchedulerConfig; - private final MergePolicyConfig mergePolicyConfig; + private final TieredMergePolicyProvider tieredMergePolicyProvider; + private final LogByteSizeMergePolicyProvider logByteSizeMergePolicyProvider; private final IndexSortConfig indexSortConfig; private final IndexScopedSettings scopedSettings; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); @@ -844,7 +893,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING); maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); - this.mergePolicyConfig = new MergePolicyConfig(logger, this); + this.tieredMergePolicyProvider = new TieredMergePolicyProvider(logger, this); + this.logByteSizeMergePolicyProvider = new LogByteSizeMergePolicyProvider(logger, this); this.indexSortConfig = new IndexSortConfig(this); searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER); defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); @@ -866,33 +916,59 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. */ widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); - - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, - mergePolicyConfig::setDeletesPctAllowed + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, + tieredMergePolicyProvider::setNoCFSRatio ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, - mergePolicyConfig::setExpungeDeletesAllowed + TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + tieredMergePolicyProvider::setDeletesPctAllowed ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, - mergePolicyConfig::setFloorSegmentSetting + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + tieredMergePolicyProvider::setExpungeDeletesAllowed ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - mergePolicyConfig::setMaxMergesAtOnce + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + tieredMergePolicyProvider::setFloorSegmentSetting ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, - mergePolicyConfig::setMaxMergedSegment + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + tieredMergePolicyProvider::setMaxMergesAtOnce ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, - mergePolicyConfig::setSegmentsPerTier + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + tieredMergePolicyProvider::setMaxMergedSegment + ); + scopedSettings.addSettingsUpdateConsumer( + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + tieredMergePolicyProvider::setSegmentsPerTier ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, + logByteSizeMergePolicyProvider::setLBSMergeFactor + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, + logByteSizeMergePolicyProvider::setLBSMinMergedMB + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeSegment + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeMBForForcedMerge + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeDocs + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING, + logByteSizeMergePolicyProvider::setLBSNoCFSRatio + ); scopedSettings.addSettingsUpdateConsumer( MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, @@ -1439,9 +1515,43 @@ public long getGcDeletesInMillis() { /** * Returns the merge policy that should be used for this index. - */ - public MergePolicy getMergePolicy() { - return mergePolicyConfig.getMergePolicy(); + * @param isTimeSeriesIndex true if index contains @timestamp field + */ + public MergePolicy getMergePolicy(boolean isTimeSeriesIndex) { + String indexScopedPolicy = scopedSettings.get(INDEX_MERGE_POLICY); + MergePolicyProvider mergePolicyProvider = null; + IndexMergePolicy indexMergePolicy = IndexMergePolicy.fromString(indexScopedPolicy); + switch (indexMergePolicy) { + case TIERED: + mergePolicyProvider = tieredMergePolicyProvider; + break; + case LOG_BYTE_SIZE: + mergePolicyProvider = logByteSizeMergePolicyProvider; + break; + case DEFAULT_POLICY: + if (isTimeSeriesIndex) { + String nodeScopedTimeSeriesIndexPolicy = TIME_SERIES_INDEX_MERGE_POLICY.get(nodeSettings); + IndexMergePolicy nodeMergePolicy = IndexMergePolicy.fromString(nodeScopedTimeSeriesIndexPolicy); + switch (nodeMergePolicy) { + case TIERED: + case DEFAULT_POLICY: + mergePolicyProvider = tieredMergePolicyProvider; + break; + case LOG_BYTE_SIZE: + mergePolicyProvider = logByteSizeMergePolicyProvider; + break; + } + } else { + mergePolicyProvider = tieredMergePolicyProvider; + } + break; + } + assert mergePolicyProvider != null : "should not happen as validation for invalid merge policy values " + + "are part of setting definition"; + if (logger.isTraceEnabled()) { + logger.trace("Index: " + this.index.getName() + ", Merge policy used: " + mergePolicyProvider); + } + return mergePolicyProvider.getMergePolicy(); } public T getValue(Setting setting) { @@ -1632,7 +1742,7 @@ public boolean isMergeOnFlushEnabled() { } private void setMergeOnFlushPolicy(String policy) { - if (Strings.isEmpty(policy) || MERGE_ON_FLUSH_DEFAULT_POLICY.equalsIgnoreCase(policy)) { + if (Strings.isEmpty(policy) || DEFAULT_POLICY.equalsIgnoreCase(policy)) { mergeOnFlushPolicy = null; } else if (MERGE_ON_FLUSH_MERGE_POLICY.equalsIgnoreCase(policy)) { this.mergeOnFlushPolicy = MergeOnFlushMergePolicy::new; @@ -1643,7 +1753,7 @@ private void setMergeOnFlushPolicy(String policy) { + " has unsupported policy specified: " + policy + ". Please use one of: " - + MERGE_ON_FLUSH_DEFAULT_POLICY + + DEFAULT_POLICY + ", " + MERGE_ON_FLUSH_MERGE_POLICY ); diff --git a/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java new file mode 100644 index 0000000000000..0b762d781957c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java @@ -0,0 +1,166 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LogByteSizeMergePolicy; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NoMergePolicy; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; + +import static org.apache.lucene.index.LogMergePolicy.DEFAULT_MAX_MERGE_DOCS; +import static org.apache.lucene.index.LogMergePolicy.DEFAULT_NO_CFS_RATIO; + +/** + *

+ * The LogByteSizeMergePolicy is an alternative merge policy primarily used here to optimize the merging of segments in scenarios + * with index with timestamps. + * While the TieredMergePolicy is the default choice, the LogByteSizeMergePolicy can be configured + * as the default merge policy for time-index data using the index.datastream_merge.policy setting. + * + *

+ * Unlike the TieredMergePolicy, which prioritizes merging segments of equal sizes, the LogByteSizeMergePolicy + * specializes in merging adjacent segments efficiently. + * This characteristic makes it particularly well-suited for range queries on time-index data. + * Typically, adjacent segments in time-index data often contain documents with similar timestamps. + * When these segments are merged, the resulting segment covers a range of timestamps with reduced overlap compared + * to the adjacent segments. This reduced overlap remains even as segments grow older and larger, + * which can significantly benefit range queries on timestamps. + * + *

+ * In contrast, the TieredMergePolicy does not honor this timestamp range optimization. It focuses on merging segments + * of equal sizes and does not consider adjacency. Consequently, as segments grow older and larger, + * the overlap of timestamp ranges among adjacent segments managed by TieredMergePolicy can increase. + * This can lead to inefficiencies in range queries on timestamps, as the number of segments to be scanned + * within a given timestamp range could become high. + * + * @opensearch.internal + */ +public class LogByteSizeMergePolicyProvider implements MergePolicyProvider { + private final LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy(); + + private final Logger logger; + private final boolean mergesEnabled; + + public static final ByteSizeValue DEFAULT_MIN_MERGE = new ByteSizeValue(2, ByteSizeUnit.MB); + public static final int DEFAULT_MERGE_FACTOR = 10; + + public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); + + public static final ByteSizeValue DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE = new ByteSizeValue(Long.MAX_VALUE); + + public static final Setting INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING = Setting.intSetting( + "index.merge.log_byte_size_policy.merge_factor", + DEFAULT_MERGE_FACTOR, // keeping it same as default max merge at once for tiered merge policy + 2, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.min_merge", + DEFAULT_MIN_MERGE, // keeping it same as default floor segment for tiered merge policy + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGE_SEGMENT_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.max_merge_segment", + DEFAULT_MAX_MERGED_SEGMENT, // keeping default same as tiered merge policy + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.max_merge_segment_forced_merge", + DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGED_DOCS_SETTING = Setting.intSetting( + "index.merge.log_byte_size_policy.max_merged_docs", + DEFAULT_MAX_MERGE_DOCS, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_NO_CFS_RATIO_SETTING = new Setting<>( + "index.merge.log_byte_size_policy.no_cfs_ratio", + Double.toString(DEFAULT_NO_CFS_RATIO), + TieredMergePolicyProvider::parseNoCFSRatio, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + LogByteSizeMergePolicyProvider(Logger logger, IndexSettings indexSettings) { + this.logger = logger; + this.mergesEnabled = indexSettings.getSettings().getAsBoolean(INDEX_MERGE_ENABLED, true); + + // Undocumented settings, works great with defaults + logByteSizeMergePolicy.setMergeFactor(indexSettings.getValue(INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING)); + logByteSizeMergePolicy.setMinMergeMB(indexSettings.getValue(INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING).getMbFrac()); + logByteSizeMergePolicy.setMaxMergeMB(indexSettings.getValue(INDEX_LBS_MAX_MERGE_SEGMENT_SETTING).getMbFrac()); + logByteSizeMergePolicy.setMaxMergeMBForForcedMerge( + indexSettings.getValue(INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING).getMbFrac() + ); + logByteSizeMergePolicy.setMaxMergeDocs(indexSettings.getValue(INDEX_LBS_MAX_MERGED_DOCS_SETTING)); + logByteSizeMergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_LBS_NO_CFS_RATIO_SETTING)); + } + + @Override + public MergePolicy getMergePolicy() { + return mergesEnabled ? logByteSizeMergePolicy : NoMergePolicy.INSTANCE; + } + + void setLBSMergeFactor(int mergeFactor) { + logByteSizeMergePolicy.setMergeFactor(mergeFactor); + } + + void setLBSMaxMergeSegment(ByteSizeValue maxMergeSegment) { + logByteSizeMergePolicy.setMaxMergeMB(maxMergeSegment.getMbFrac()); + } + + void setLBSMinMergedMB(ByteSizeValue minMergedSize) { + logByteSizeMergePolicy.setMinMergeMB(minMergedSize.getMbFrac()); + } + + void setLBSMaxMergeMBForForcedMerge(ByteSizeValue maxMergeForcedMerge) { + logByteSizeMergePolicy.setMaxMergeMBForForcedMerge(maxMergeForcedMerge.getMbFrac()); + } + + void setLBSMaxMergeDocs(int maxMergeDocs) { + logByteSizeMergePolicy.setMaxMergeDocs(maxMergeDocs); + } + + void setLBSNoCFSRatio(Double noCFSRatio) { + logByteSizeMergePolicy.setNoCFSRatio(noCFSRatio); + } + + @Override + public String toString() { + return "LogByteSizeMergePolicyProvider{" + + "mergeFactor=" + + logByteSizeMergePolicy.getMergeFactor() + + ", minMergeMB=" + + logByteSizeMergePolicy.getMinMergeMB() + + ", maxMergeMB=" + + logByteSizeMergePolicy.getMaxMergeMB() + + ", maxMergeMBForForcedMerge=" + + logByteSizeMergePolicy.getMaxMergeMBForForcedMerge() + + ", maxMergedDocs=" + + logByteSizeMergePolicy.getMaxMergeDocs() + + ", noCFSRatio=" + + logByteSizeMergePolicy.getNoCFSRatio() + + '}'; + } + +} diff --git a/server/src/main/java/org/opensearch/index/MergePolicyProvider.java b/server/src/main/java/org/opensearch/index/MergePolicyProvider.java new file mode 100644 index 0000000000000..6f734314f758f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/MergePolicyProvider.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.lucene.index.MergePolicy; +import org.opensearch.common.annotation.InternalApi; + +/** + * A provider for obtaining merge policies used by OpenSearch indexes. + * + * @opensearch.internal + */ + +@InternalApi +public interface MergePolicyProvider { + // don't convert to Setting<> and register... we only set this in tests and register via a plugin + String INDEX_MERGE_ENABLED = "index.merge.enabled"; + + /** + * Gets the merge policy to be used for index. + * + * @return The merge policy instance. + */ + MergePolicy getMergePolicy(); +} diff --git a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java similarity index 82% rename from server/src/main/java/org/opensearch/index/MergePolicyConfig.java rename to server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java index fe2af21dfe039..d5d354c6c960a 100644 --- a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java @@ -33,6 +33,7 @@ package org.opensearch.index; import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.TieredMergePolicy; @@ -47,9 +48,12 @@ * where the index data is stored, and are immutable up to delete markers. * Segments are, periodically, merged into larger segments to keep the * index size at bay and expunge deletes. + * This class customizes and exposes 2 merge policies from lucene - + * {@link LogByteSizeMergePolicy} and {@link TieredMergePolicy}. + * * *

- * Merges select segments of approximately equal size, subject to an allowed + * Tiered merge policy select segments of approximately equal size, subject to an allowed * number of segments per tier. The merge policy is able to merge * non-adjacent segments, and separates how many segments are merged at once from how many * segments are allowed per tier. It also does not over-merge (i.e., cascade merges). @@ -125,8 +129,9 @@ * @opensearch.internal */ -public final class MergePolicyConfig { - private final OpenSearchTieredMergePolicy mergePolicy = new OpenSearchTieredMergePolicy(); +public final class TieredMergePolicyProvider implements MergePolicyProvider { + private final OpenSearchTieredMergePolicy tieredMergePolicy = new OpenSearchTieredMergePolicy(); + private final Logger logger; private final boolean mergesEnabled; @@ -137,10 +142,11 @@ public final class MergePolicyConfig { public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; public static final double DEFAULT_DELETES_PCT_ALLOWED = 20.0d; + public static final Setting INDEX_COMPOUND_FORMAT_SETTING = new Setting<>( "index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), - MergePolicyConfig::parseNoCFSRatio, + TieredMergePolicyProvider::parseNoCFSRatio, Property.Dynamic, Property.IndexScope ); @@ -194,10 +200,8 @@ public final class MergePolicyConfig { Property.Dynamic, Property.IndexScope ); - // don't convert to Setting<> and register... we only set this in tests and register via a plugin - public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; - MergePolicyConfig(Logger logger, IndexSettings indexSettings) { + TieredMergePolicyProvider(Logger logger, IndexSettings indexSettings) { this.logger = logger; double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); @@ -216,54 +220,41 @@ public final class MergePolicyConfig { ); } maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier); - mergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); - mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); - mergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); - mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); - mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); - mergePolicy.setSegmentsPerTier(segmentsPerTier); - mergePolicy.setDeletesPctAllowed(deletesPctAllowed); - if (logger.isTraceEnabled()) { - logger.trace( - "using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}]," - + " max_merge_at_once[{}], max_merged_segment[{}], segments_per_tier[{}]," - + " deletes_pct_allowed[{}]", - forceMergeDeletesPctAllowed, - floorSegment, - maxMergeAtOnce, - maxMergedSegment, - segmentsPerTier, - deletesPctAllowed - ); - } + tieredMergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); + tieredMergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); + tieredMergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); + tieredMergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); + tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); + tieredMergePolicy.setSegmentsPerTier(segmentsPerTier); + tieredMergePolicy.setDeletesPctAllowed(deletesPctAllowed); } void setSegmentsPerTier(Double segmentsPerTier) { - mergePolicy.setSegmentsPerTier(segmentsPerTier); + tieredMergePolicy.setSegmentsPerTier(segmentsPerTier); } void setMaxMergedSegment(ByteSizeValue maxMergedSegment) { - mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); + tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); } void setMaxMergesAtOnce(Integer maxMergeAtOnce) { - mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); + tieredMergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); } void setFloorSegmentSetting(ByteSizeValue floorSegementSetting) { - mergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); + tieredMergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); } void setExpungeDeletesAllowed(Double value) { - mergePolicy.setForceMergeDeletesPctAllowed(value); + tieredMergePolicy.setForceMergeDeletesPctAllowed(value); } void setNoCFSRatio(Double noCFSRatio) { - mergePolicy.setNoCFSRatio(noCFSRatio); + tieredMergePolicy.setNoCFSRatio(noCFSRatio); } void setDeletesPctAllowed(Double deletesPctAllowed) { - mergePolicy.setDeletesPctAllowed(deletesPctAllowed); + tieredMergePolicy.setDeletesPctAllowed(deletesPctAllowed); } private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerTier) { @@ -285,11 +276,11 @@ private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerT return maxMergeAtOnce; } - MergePolicy getMergePolicy() { - return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE; + public MergePolicy getMergePolicy() { + return mergesEnabled ? tieredMergePolicy : NoMergePolicy.INSTANCE; } - private static double parseNoCFSRatio(String noCFSRatio) { + public static double parseNoCFSRatio(String noCFSRatio) { noCFSRatio = noCFSRatio.trim(); if (noCFSRatio.equalsIgnoreCase("true")) { return 1.0d; @@ -310,4 +301,23 @@ private static double parseNoCFSRatio(String noCFSRatio) { } } } + + @Override + public String toString() { + return "TieredMergePolicyProvider{" + + "expungeDeletesAllowed=" + + tieredMergePolicy.getForceMergeDeletesPctAllowed() + + ", floorSegment=" + + tieredMergePolicy.getFloorSegmentMB() + + ", maxMergeAtOnce=" + + tieredMergePolicy.getMaxMergeAtOnce() + + ", maxMergedSegment=" + + tieredMergePolicy.getMaxMergedSegmentMB() + + ", segmentsPerTier=" + + tieredMergePolicy.getSegmentsPerTier() + + ", deletesPctAllowed=" + + tieredMergePolicy.getDeletesPctAllowed() + + '}'; + } + } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index d476e8b7c9288..5ce066b156775 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3772,7 +3772,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro indexSettings, warmer, store, - indexSettings.getMergePolicy(), + indexSettings.getMergePolicy(isTimeSeriesIndex), mapperService != null ? mapperService.indexAnalyzer() : null, similarityService.similarity(mapperService), engineConfigFactory.newCodecServiceOrDefault(indexSettings, mapperService, logger, codecService), diff --git a/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java index 67846efab2af8..d35c821b41aa0 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java @@ -34,7 +34,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.settings.Settings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.indices.IndexClosedException; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; @@ -56,7 +56,7 @@ protected Collection> getPlugins() { public void setupIndex() { Settings settings = Settings.builder() // don't allow any merges so that the num docs is the expected segments - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .build(); createIndex("test", settings); diff --git a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java index 387997892ee30..32c4c048d77ba 100644 --- a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java @@ -31,6 +31,7 @@ package org.opensearch.index; +import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; @@ -49,17 +50,17 @@ public class MergePolicySettingsTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); public void testCompoundFileSettings() throws IOException { - assertThat(new MergePolicyConfig(logger, indexSettings(Settings.EMPTY)).getMergePolicy().getNoCFSRatio(), equalTo(0.1)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(Settings.EMPTY)).getMergePolicy().getNoCFSRatio(), equalTo(0.1)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); } private static IndexSettings indexSettings(Settings settings) { @@ -67,33 +68,197 @@ private static IndexSettings indexSettings(Settings settings) { } public void testNoMerges() { - MergePolicyConfig mp = new MergePolicyConfig( + TieredMergePolicyProvider tmp = new TieredMergePolicyProvider( logger, - indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build()) + indexSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false).build()) ); - assertTrue(mp.getMergePolicy() instanceof NoMergePolicy); + LogByteSizeMergePolicyProvider lbsmp = new LogByteSizeMergePolicyProvider( + logger, + indexSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false).build()) + ); + assertTrue(tmp.getMergePolicy() instanceof NoMergePolicy); + assertTrue(lbsmp.getMergePolicy() instanceof NoMergePolicy); } public void testUpdateSettings() throws IOException { - IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); - assertThat(indexSettings.getMergePolicy().getNoCFSRatio(), equalTo(0.1)); + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + IndexSettings indexSettings = indexSettings(settings); + assertThat(indexSettings.getMergePolicy(false).getNoCFSRatio(), equalTo(0.1)); indexSettings = indexSettings(build(0.9)); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.9)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.9)); indexSettings.updateIndexMetadata(newIndexMeta("index", build(0.1))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.1)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.1)); indexSettings.updateIndexMetadata(newIndexMeta("index", build(0.0))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.0)); indexSettings.updateIndexMetadata(newIndexMeta("index", build("true"))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(1.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(1.0)); indexSettings.updateIndexMetadata(newIndexMeta("index", build("false"))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.0)); + } + + public void testDefaultMergePolicy() throws IOException { + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + } + + public void testMergePolicyPrecedence() throws IOException { + // 1. INDEX_MERGE_POLICY is not set + // assert defaults + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 1.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as log_byte_size + // assert index policy is tiered whereas time series index policy is log_byte_size + Settings nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build(); + indexSettings = new IndexSettings(newIndexMeta("test", Settings.EMPTY), nodeSettings); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + // 1.2 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as tiered + // assert both index and time series index policy is tiered + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + indexSettings = new IndexSettings(newIndexMeta("test", Settings.EMPTY), nodeSettings); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 2. INDEX_MERGE_POLICY set as tiered + // assert both index and time-series-index merge policy is set as tiered + indexSettings = indexSettings( + Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()).build() + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 2.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as log_byte_size + // assert both index and time-series-index merge policy is set as tiered + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build(); + indexSettings = new IndexSettings( + newIndexMeta( + "test", + Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()).build() + ), + nodeSettings + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 3. INDEX_MERGE_POLICY set as log_byte_size + // assert both index and time-series-index merge policy is set as log_byte_size + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof LogByteSizeMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + // 3.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as tiered + // assert both index and time-series-index merge policy is set as log_byte_size + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + indexSettings = new IndexSettings( + newIndexMeta( + "test", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ), + nodeSettings + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof LogByteSizeMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + } + + public void testInvalidMergePolicy() throws IOException { + + final Settings invalidSettings = Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "invalid").build(); + IllegalArgumentException exc1 = expectThrows( + IllegalArgumentException.class, + () -> IndexSettings.INDEX_MERGE_POLICY.get(invalidSettings) + ); + assertThat(exc1.getMessage(), containsString(" has unsupported policy specified: ")); + IllegalArgumentException exc2 = expectThrows( + IllegalArgumentException.class, + () -> indexSettings(invalidSettings).getMergePolicy(false) + ); + assertThat(exc2.getMessage(), containsString(" has unsupported policy specified: ")); + + final Settings invalidSettings2 = Settings.builder().put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), "invalid").build(); + IllegalArgumentException exc3 = expectThrows( + IllegalArgumentException.class, + () -> IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.get(invalidSettings2) + ); + assertThat(exc3.getMessage(), containsString(" has unsupported policy specified: ")); + + IllegalArgumentException exc4 = expectThrows( + IllegalArgumentException.class, + () -> new IndexSettings(newIndexMeta("test", Settings.EMPTY), invalidSettings2).getMergePolicy(true) + ); + assertThat(exc4.getMessage(), containsString(" has unsupported policy specified: ")); + } + + public void testUpdateSettingsForLogByteSizeMergePolicy() throws IOException { + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + assertThat(indexSettings.getMergePolicy(true).getNoCFSRatio(), equalTo(0.1)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.9) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.9)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.1) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.1)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.0) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.0)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), "true") + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(1.0)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), "false") + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.0)); } public void testTieredMergePolicySettingsUpdate() throws IOException { IndexSettings indexSettings = indexSettings(Settings.EMPTY); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d ); @@ -102,21 +267,21 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0 ); indexSettings.updateIndexMetadata( @@ -124,41 +289,41 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE ); indexSettings.updateIndexMetadata( newIndexMeta( "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE - 1 ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE - 1 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001 ); indexSettings.updateIndexMetadata( @@ -166,21 +331,21 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1) + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1) ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0 ); indexSettings.updateIndexMetadata( @@ -188,37 +353,37 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1 + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1 ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1, 0 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_DELETES_PCT_ALLOWED, 0 ); indexSettings.updateIndexMetadata( newIndexMeta( "index", - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build() + Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build() ) ); - assertEquals(((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), 22, 0); + assertEquals(((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), 22, 0); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, () -> indexSettings.updateIndexMetadata( newIndexMeta( "index", - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build() + Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build() ) ) ); @@ -226,50 +391,162 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { assertThat(cause.getMessage(), containsString("must be <= 50.0")); indexSettings.updateIndexMetadata(newIndexMeta("index", EMPTY_SETTINGS)); // see if defaults are restored assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_DELETES_PCT_ALLOWED, 0 ); } + public void testLogByteSizeMergePolicySettingsUpdate() throws IOException { + + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + 1 + ) + .build() + ) + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + 1 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING.getKey(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE.getMb() + 1, ByteSizeUnit.MB) + ) + .build() + ) + ); + + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING.getKey(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMb() + 100, ByteSizeUnit.MB) + ) + .build() + ) + ); + + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMb() + 100, ByteSizeUnit.MB).getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING.getKey(), + new ByteSizeValue( + LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE.getMb() - 100, + ByteSizeUnit.MB + ) + ) + .build() + ) + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMBForForcedMerge(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE.getMb() - 100, ByteSizeUnit.MB) + .getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING.getKey(), 10000000) + .build() + ) + ); + assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeDocs(), 10000000); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.1) + .build() + ) + ); + assertEquals(indexSettings.getMergePolicy(true).getNoCFSRatio(), 0.1, 0.0); + } + public Settings build(String value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(double value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(int value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(boolean value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } } diff --git a/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java b/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java index 2443ee1ab40be..baaf584702f78 100644 --- a/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java @@ -92,8 +92,8 @@ public void testUpdateAutoThrottleSettings() throws Exception { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "2") .put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "true"); @@ -123,8 +123,8 @@ public void testUpdateMergeMaxThreadCount() throws Exception { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "10000") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000"); IndexSettings settings = new IndexSettings(newIndexMeta("index", builder.build()), Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 9c8f9896850c6..c88c86d51be08 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -58,7 +58,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.PersistedClusterStateService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.EngineConfigFactory; import org.opensearch.index.engine.EngineCreationFailureException; import org.opensearch.index.engine.InternalEngineFactory; @@ -134,7 +134,7 @@ public void setup() throws IOException { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_INDEX_UUID, shardId.getIndex().getUUID()) .build(); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index 11d916616578d..ad90255a3cc3f 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -54,7 +54,7 @@ import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.VersionType; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; @@ -168,7 +168,7 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10) // If soft-deletes is enabled, delete#1 will be reclaimed because its segment (segment_1) is fully deleted // index#0 will be retained if merge is disabled; otherwise it will be reclaimed because gcp=3 and retained_ops=0 - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .build(); try (ReplicationGroup shards = createGroup(1, settings)) { shards.startAll(); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 6e064f943ca07..0b80c6e577f95 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -130,9 +130,9 @@ import org.opensearch.http.HttpInfo; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.MockEngineFactoryPlugin; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.Segment; import org.opensearch.index.mapper.CompletionFieldMapper; @@ -500,7 +500,7 @@ protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builde private static Settings.Builder setRandomIndexMergeSettings(Random random, Settings.Builder builder) { if (random.nextBoolean()) { builder.put( - MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), (random.nextBoolean() ? random.nextDouble() : random.nextBoolean()).toString() ); } From d3bf230fe820f20121f3be35ca42b97cf804fea1 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Mon, 2 Oct 2023 13:47:48 -0700 Subject: [PATCH 07/26] Simplify initialization of Settings (#10194) There was a weird circular class-loading dependency between Settings and Settings.Builder that could result in ClassLoader deadlock if multiple threads trigger class-loading of Settings at the same time. Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + .../main/java/org/opensearch/common/settings/Settings.java | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7519e242c0acf..46f34abf72641 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -129,6 +129,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) - Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) - Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) +- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) ### Security diff --git a/server/src/main/java/org/opensearch/common/settings/Settings.java b/server/src/main/java/org/opensearch/common/settings/Settings.java index 91e39e38f0379..d202feb0786bf 100644 --- a/server/src/main/java/org/opensearch/common/settings/Settings.java +++ b/server/src/main/java/org/opensearch/common/settings/Settings.java @@ -101,7 +101,7 @@ @PublicApi(since = "1.0.0") public final class Settings implements ToXContentFragment { - public static final Settings EMPTY = new Builder().build(); + public static final Settings EMPTY = new Settings(Collections.emptyMap(), null); /** The raw settings from the full key to raw string value. */ private final Map settings; @@ -757,7 +757,7 @@ public Set keySet() { @PublicApi(since = "1.0.0") public static class Builder { - public static final Settings EMPTY_SETTINGS = new Builder().build(); + public static final Settings EMPTY_SETTINGS = Settings.EMPTY; // we use a sorted map for consistent serialization when using getAsMap() private final Map map = new TreeMap<>(); From 9d0db5ef413ebc9f2eb7d8721ef74be273908094 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Romain=20Tarti=C3=A8re?= Date: Mon, 2 Oct 2023 10:58:24 -1000 Subject: [PATCH 08/26] Remove spurious SGID (#9447) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Setting the SGID bit on directories is maybe something some users will want to use, but setting it by default for all users does not really make sense and when packaging OpenSearch, we need to remove this customization when building packges. This was added to ElasticSearch to make it possible to manage the keystore as root while the service runs as an unprivileged user. Without the SGID trick, the generated keystore was owned by root and ElasticSearch could not access it. It is preferable to manage the keystore with non-root privileges, and this hack is not required in this case. Stick to the default permissions and remove this personalization. Signed-off-by: Romain Tartière --- CHANGELOG.md | 1 + distribution/packages/build.gradle | 8 ++++---- distribution/packages/src/deb/lintian/opensearch | 8 ++++---- .../test/java/org/opensearch/packaging/util/Packages.java | 4 ++-- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 46f34abf72641..c8e4cc57593a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -123,6 +123,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Deprecated ### Removed +- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) ### Fixed - Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 7914fcc172ef4..262ad6c802bbb 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -213,7 +213,7 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { configurationFile '/etc/opensearch/jvm.options' configurationFile '/etc/opensearch/log4j2.properties' from("${packagingFiles}") { - dirMode 02750 + dirMode 0750 into('/etc') permissionGroup 'opensearch' includeEmptyDirs true @@ -223,7 +223,7 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { } from("${packagingFiles}/etc/opensearch") { into('/etc/opensearch') - dirMode 02750 + dirMode 0750 fileMode 0660 permissionGroup 'opensearch' includeEmptyDirs true @@ -281,8 +281,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { dirMode mode } } - copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 02750) - copyEmptyDir('/var/lib/opensearch', 'opensearch', 'opensearch', 02750) + copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 0750) + copyEmptyDir('/var/lib/opensearch', 'opensearch', 'opensearch', 0750) copyEmptyDir('/usr/share/opensearch/plugins', 'root', 'root', 0755) into '/usr/share/opensearch' diff --git a/distribution/packages/src/deb/lintian/opensearch b/distribution/packages/src/deb/lintian/opensearch index 854b23131ecbc..e6db8e8c6b322 100644 --- a/distribution/packages/src/deb/lintian/opensearch +++ b/distribution/packages/src/deb/lintian/opensearch @@ -15,11 +15,11 @@ missing-dep-on-jarwrapper # we prefer to not make our config and log files world readable non-standard-file-perm etc/default/opensearch 0660 != 0644 -non-standard-dir-perm etc/opensearch/ 2750 != 0755 -non-standard-dir-perm etc/opensearch/jvm.options.d/ 2750 != 0755 +non-standard-dir-perm etc/opensearch/ 0750 != 0755 +non-standard-dir-perm etc/opensearch/jvm.options.d/ 0750 != 0755 non-standard-file-perm etc/opensearch/* -non-standard-dir-perm var/lib/opensearch/ 2750 != 0755 -non-standard-dir-perm var/log/opensearch/ 2750 != 0755 +non-standard-dir-perm var/lib/opensearch/ 0750 != 0755 +non-standard-dir-perm var/log/opensearch/ 0750 != 0755 executable-is-not-world-readable etc/init.d/opensearch 0750 non-standard-file-permissions-for-etc-init.d-script etc/init.d/opensearch 0750 != 0755 diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java index b80ae422bda9a..e9ebf28042b46 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java @@ -194,11 +194,11 @@ private static void verifyInstallation(Installation opensearch, Distribution dis // we shell out here because java's posix file permission view doesn't support special modes assertThat(opensearch.config, file(Directory, "root", "opensearch", p750)); - assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750")); final Path jvmOptionsDirectory = opensearch.config.resolve("jvm.options.d"); assertThat(jvmOptionsDirectory, file(Directory, "root", "opensearch", p750)); - assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750")); Stream.of("opensearch.keystore", "opensearch.yml", "jvm.options", "log4j2.properties") .forEach(configFile -> assertThat(opensearch.config(configFile), file(File, "root", "opensearch", p660))); From 1d66af346f7863bbca44a3102a1af2fed5fd3804 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Mon, 2 Oct 2023 17:51:47 -0400 Subject: [PATCH 09/26] Fix: register mulitple extensions. (#10256) * Fix: register mulitple extensions. Signed-off-by: dblock * Updated CHANGELOG. Signed-off-by: dblock * Added tests. Signed-off-by: dblock --------- Signed-off-by: dblock --- CHANGELOG.md | 1 + .../extensions/ExtensionsManager.java | 21 ++- .../rest/RestActionsRequestHandler.java | 3 + .../rest/RestInitializeExtensionAction.java | 3 +- .../rest/RestSendToExtensionAction.java | 2 +- .../extensions/ExtensionsManagerTests.java | 145 +++++++++++++++++- .../RestInitializeExtensionActionTests.java | 19 +-- .../rest/RestSendToExtensionActionTests.java | 6 +- 8 files changed, 176 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c8e4cc57593a4..fb37533d4c834 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -131,6 +131,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) - Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) - Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) +- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) ### Security diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java index 9f9ba548143c6..b531abcb845d7 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -300,7 +300,7 @@ private void registerRequestHandler(DynamicActionRegistry dynamicActionRegistry) * Loads a single extension * @param extension The extension to be loaded */ - public void loadExtension(Extension extension) throws IOException { + public DiscoveryExtensionNode loadExtension(Extension extension) throws IOException { validateExtension(extension); DiscoveryExtensionNode discoveryExtensionNode = new DiscoveryExtensionNode( extension.getName(), @@ -314,6 +314,12 @@ public void loadExtension(Extension extension) throws IOException { extensionIdMap.put(extension.getUniqueId(), discoveryExtensionNode); extensionSettingsMap.put(extension.getUniqueId(), extension); logger.info("Loaded extension with uniqueId " + extension.getUniqueId() + ": " + extension); + return discoveryExtensionNode; + } + + public void initializeExtension(Extension extension) throws IOException { + DiscoveryExtensionNode node = loadExtension(extension); + initializeExtensionNode(node); } private void validateField(String fieldName, String value) throws IOException { @@ -340,11 +346,11 @@ private void validateExtension(Extension extension) throws IOException { */ public void initialize() { for (DiscoveryExtensionNode extension : extensionIdMap.values()) { - initializeExtension(extension); + initializeExtensionNode(extension); } } - private void initializeExtension(DiscoveryExtensionNode extension) { + public void initializeExtensionNode(DiscoveryExtensionNode extensionNode) { final CompletableFuture inProgressFuture = new CompletableFuture<>(); final TransportResponseHandler initializeExtensionResponseHandler = new TransportResponseHandler< @@ -384,7 +390,8 @@ public String executor() { transportService.getThreadPool().generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - extensionIdMap.remove(extension.getId()); + logger.warn("Error registering extension: " + extensionNode.getId(), e); + extensionIdMap.remove(extensionNode.getId()); if (e.getCause() instanceof ConnectTransportException) { logger.info("No response from extension to request.", e); throw (ConnectTransportException) e.getCause(); @@ -399,11 +406,11 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { - transportService.connectToExtensionNode(extension); + transportService.connectToExtensionNode(extensionNode); transportService.sendRequest( - extension, + extensionNode, REQUEST_EXTENSION_ACTION_NAME, - new InitializeExtensionRequest(transportService.getLocalNode(), extension, issueServiceAccount(extension)), + new InitializeExtensionRequest(transportService.getLocalNode(), extensionNode, issueServiceAccount(extensionNode)), initializeExtensionResponseHandler ); } diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java index 97851cbd394a0..383796f0c3b44 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java @@ -62,6 +62,9 @@ public TransportResponse handleRegisterRestActionsRequest( DynamicActionRegistry dynamicActionRegistry ) throws Exception { DiscoveryExtensionNode discoveryExtensionNode = extensionIdMap.get(restActionsRequest.getUniqueId()); + if (discoveryExtensionNode == null) { + throw new IllegalStateException("Missing extension node for " + restActionsRequest.getUniqueId()); + } RestHandler handler = new RestSendToExtensionAction( restActionsRequest, discoveryExtensionNode, diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java index 4b622b841a040..fc7c21a6eccd6 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java @@ -159,8 +159,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client extAdditionalSettings ); try { - extensionsManager.loadExtension(extension); - extensionsManager.initialize(); + extensionsManager.initializeExtension(extension); } catch (CompletionException e) { Throwable cause = e.getCause(); if (cause instanceof TimeoutException) { diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index 33f44a913dd8a..41783b89ccc69 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -150,7 +150,7 @@ public RestSendToExtensionAction( @Override public String getName() { - return SEND_TO_EXTENSION_ACTION; + return this.discoveryExtensionNode.getId() + ":" + SEND_TO_EXTENSION_ACTION; } @Override diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index f243a924f4e63..c61afdd5c5261 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -36,6 +36,7 @@ import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.discovery.InitializeExtensionRequest; import org.opensearch.env.Environment; import org.opensearch.env.EnvironmentSettingsResponse; import org.opensearch.extensions.ExtensionsSettings.Extension; @@ -77,6 +78,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -409,19 +411,94 @@ public void testInitialize() throws Exception { ) ); - // Test needs to be changed to mock the connection between the local node and an extension. Assert statment is commented out for - // now. + // Test needs to be changed to mock the connection between the local node and an extension. // Link to issue: https://github.com/opensearch-project/OpenSearch/issues/4045 // mockLogAppender.assertAllExpectationsMatched(); } } + public void testInitializeExtension() throws Exception { + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + + TransportService mockTransportService = spy( + new TransportService( + Settings.EMPTY, + mock(Transport.class), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, + null, + Collections.emptySet(), + NoopTracer.INSTANCE + ) + ); + + doNothing().when(mockTransportService).connectToExtensionNode(any(DiscoveryExtensionNode.class)); + + doNothing().when(mockTransportService) + .sendRequest(any(DiscoveryExtensionNode.class), anyString(), any(InitializeExtensionRequest.class), any()); + + extensionsManager.initializeServicesAndRestHandler( + actionModule, + settingsModule, + mockTransportService, + clusterService, + settings, + client, + identityService + ); + + Extension firstExtension = new Extension( + "firstExtension", + "uniqueid1", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + null + ); + + extensionsManager.initializeExtension(firstExtension); + + Extension secondExtension = new Extension( + "secondExtension", + "uniqueid2", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + null + ); + + extensionsManager.initializeExtension(secondExtension); + + ThreadPool.terminate(threadPool, 3, TimeUnit.SECONDS); + + verify(mockTransportService, times(2)).connectToExtensionNode(any(DiscoveryExtensionNode.class)); + + verify(mockTransportService, times(2)).sendRequest( + any(DiscoveryExtensionNode.class), + anyString(), + any(InitializeExtensionRequest.class), + any() + ); + } + public void testHandleRegisterRestActionsRequest() throws Exception { ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); + List actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); List deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -431,6 +508,58 @@ public void testHandleRegisterRestActionsRequest() throws Exception { assertTrue(((AcknowledgedResponse) response).getStatus()); } + public void testHandleRegisterRestActionsRequestRequiresDiscoveryNode() throws Exception { + + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + initialize(extensionsManager); + + RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest("uniqueId1", List.of(), List.of()); + + expectThrows( + IllegalStateException.class, + () -> extensionsManager.getRestActionsRequestHandler() + .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()) + ); + } + + public void testHandleRegisterRestActionsRequestMultiple() throws Exception { + + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + initialize(extensionsManager); + + List actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); + List deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); + for (int i = 0; i < 2; i++) { + String uniqueIdStr = "uniqueid-%d" + i; + + Set> additionalSettings = extAwarePlugin.getExtensionSettings().stream().collect(Collectors.toSet()); + ExtensionScopedSettings extensionScopedSettings = new ExtensionScopedSettings(additionalSettings); + Extension firstExtension = new Extension( + "Extension %s" + i, + uniqueIdStr, + "127.0.0.0", + "9300", + "0.0.7", + "3.0.0", + "3.0.0", + List.of(), + extensionScopedSettings + ); + + extensionsManager.loadExtension(firstExtension); + + RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest( + uniqueIdStr, + actionsList, + deprecatedActionsList + ); + TransportResponse response = extensionsManager.getRestActionsRequestHandler() + .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()); + assertEquals(AcknowledgedResponse.class, response.getClass()); + assertTrue(((AcknowledgedResponse) response).getStatus()); + } + } + public void testHandleRegisterSettingsRequest() throws Exception { ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); @@ -452,6 +581,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Excep initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("FOO /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -467,6 +599,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedMethod() th initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("FOO /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -481,6 +616,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exceptio ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("GET", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -495,6 +633,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedUri() throw ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java index cdddf8e9be1be..e237214ab88f5 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java @@ -19,8 +19,9 @@ import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.ExtensionsManager; -import org.opensearch.extensions.ExtensionsSettings; +import org.opensearch.extensions.ExtensionsSettings.Extension; import org.opensearch.identity.IdentityService; import org.opensearch.rest.RestRequest; import org.opensearch.telemetry.tracing.noop.NoopTracer; @@ -160,8 +161,8 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th // optionally, you can stub out some methods: when(spy.getAdditionalSettings()).thenCallRealMethod(); - Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); - Mockito.doNothing().when(spy).initialize(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(Extension.class)); + Mockito.doNothing().when(spy).initializeExtensionNode(any(DiscoveryExtensionNode.class)); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" @@ -177,10 +178,10 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th FakeRestChannel channel = new FakeRestChannel(request, false, 0); restInitializeExtensionAction.handleRequest(request, channel, null); - assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertEquals(RestStatus.ACCEPTED, channel.capturedResponse().status()); assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); - Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); assertTrue(extension.isPresent()); assertEquals(true, extension.get().getAdditionalSettings().get(boolSetting)); assertEquals("customSetting", extension.get().getAdditionalSettings().get(stringSetting)); @@ -210,8 +211,8 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing // optionally, you can stub out some methods: when(spy.getAdditionalSettings()).thenCallRealMethod(); - Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); - Mockito.doNothing().when(spy).initialize(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(Extension.class)); + Mockito.doNothing().when(spy).initializeExtensionNode(any(DiscoveryExtensionNode.class)); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" @@ -227,10 +228,10 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing FakeRestChannel channel = new FakeRestChannel(request, false, 0); restInitializeExtensionAction.handleRequest(request, channel, null); - assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertEquals(RestStatus.ACCEPTED, channel.capturedResponse().status()); assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); - Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); assertTrue(extension.isPresent()); assertEquals(false, extension.get().getAdditionalSettings().get(boolSetting)); assertEquals("default", extension.get().getAdditionalSettings().get(stringSetting)); diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index ee36ea170e270..fe738ff7d85e6 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -150,7 +150,7 @@ public void testRestSendToExtensionAction() throws Exception { identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; expected.add(new Route(Method.GET, uriPrefix + "/foo")); @@ -183,7 +183,7 @@ public void testRestSendToExtensionActionWithNamedRoute() throws Exception { identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET).path(uriPrefix + "/foo").uniqueName("foo").build(); @@ -229,7 +229,7 @@ public void testRestSendToExtensionActionWithNamedRouteAndLegacyActionName() thr identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET) From beb25b1957321234aa3642977b0f7144318c8d8a Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 2 Oct 2023 18:21:58 -0400 Subject: [PATCH 10/26] Bump netty from 4.1.97.Final to 4.1.99.Final (#10306) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 + .../transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 | 1 - .../transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.97.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.97.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.97.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.99.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.97.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 + 68 files changed, 35 insertions(+), 34 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index fb37533d4c834..9b4e6408f3491 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -110,6 +110,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` - Bump Lucene from 9.7.0 to 9.8.0 ([#10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) - Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) +- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 243a1b2c6f57e..e54a5a1089a93 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -28,7 +28,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.97.Final +netty = 4.1.99.Final joda = 2.12.2 # client dependencies diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8430355365996..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..5b393be40e945 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 deleted file mode 100644 index 7a36dc1f2724f..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..45ea27d29a183 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 deleted file mode 100644 index 37b78a32f741f..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6bb7fcd68b272 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 deleted file mode 100644 index cbf685a6d79d3..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..f9bdefc6dd965 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index 1bdfec3aae6ba..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..d53adfa649f5f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8b7b50a6fc9c6..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..258f7c957dda0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 deleted file mode 100644 index 032959e98d009..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..b8bc0a4370f58 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 deleted file mode 100644 index 107863c1b3c9d..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..247975e0a64c7 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index f736d37d071b7..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6b7b66ea768e3 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 deleted file mode 100644 index f592ac8312a5d..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d266d079ef33cf93a16b382d64dd15d562df1159 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6c1112ed49775 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +21c76a42a468faafac6c84f8aca775073fc8e345 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 deleted file mode 100644 index cbf685a6d79d3..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..f9bdefc6dd965 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 deleted file mode 100644 index d06147a0ba646..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30e8fa29a349db5a933225d61891b8802836bb79 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..717703c36e1ab --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +259bf1c5178c3e23bb89a2fab59b6d22846e3fa6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 deleted file mode 100644 index 67c3a763d26fa..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a99ecef0e1d86a92e40a7c89805c236d9cd7493e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..3f69ae54c5d4a --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +8c8a89ea89b06e120c57bdb3db14b9a47ca30bb3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 deleted file mode 100644 index 60fd706436ae7..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2c50f835777ecd4535e15b552b5d9ccb26a2504f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..adef44a4e7da7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +804d8b752847923d3bb81f24de604597047c9b2e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index f736d37d071b7..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6b7b66ea768e3 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 deleted file mode 100644 index c6fa4cc175222..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afec3c414a0ab7264a66a7572e9e9d3a19a3e0e5 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..0756635018837 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +a45aa70bc50d0500da5cdcd595cc838d87ada987 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8430355365996..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..5b393be40e945 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 deleted file mode 100644 index 7a36dc1f2724f..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..45ea27d29a183 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 deleted file mode 100644 index 37b78a32f741f..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6bb7fcd68b272 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 deleted file mode 100644 index cbf685a6d79d3..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..f9bdefc6dd965 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index 1bdfec3aae6ba..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..d53adfa649f5f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8b7b50a6fc9c6..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..258f7c957dda0 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 deleted file mode 100644 index 032959e98d009..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..b8bc0a4370f58 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 deleted file mode 100644 index 107863c1b3c9d..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..247975e0a64c7 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8e40c8826d76d..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -795da37ded759e862457a82d9d92c4d39ce8ecee \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..75b64ad4197d8 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +961bd5b8d97ea6a07168176462f398089a24b5c8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index f736d37d071b7..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6b7b66ea768e3 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8430355365996..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..5b393be40e945 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 deleted file mode 100644 index 7a36dc1f2724f..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..45ea27d29a183 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 deleted file mode 100644 index 37b78a32f741f..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6bb7fcd68b272 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index 1bdfec3aae6ba..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..d53adfa649f5f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8b7b50a6fc9c6..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..258f7c957dda0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 deleted file mode 100644 index 032959e98d009..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..b8bc0a4370f58 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 deleted file mode 100644 index 107863c1b3c9d..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..247975e0a64c7 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file From 3a790c150e6a8b45d385341e276da6f226489515 Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Mon, 2 Oct 2023 22:37:17 -0700 Subject: [PATCH 11/26] Disable concurrent search when terminate_after is used (#10200) Signed-off-by: Jay Deng --- CHANGELOG.md | 1 + .../simple/ParameterizedSimpleSearchIT.java | 608 ----------------- .../search/simple/SimpleSearchIT.java | 645 +++++++++++++++++- .../search/DefaultSearchContext.java | 2 + 4 files changed, 645 insertions(+), 611 deletions(-) delete mode 100644 server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b4e6408f3491..9963eaef31d33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -120,6 +120,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) - [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) - Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) +- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java deleted file mode 100644 index 719b75079da92..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java +++ /dev/null @@ -1,608 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.search.simple; - -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.search.SearchPhaseExecutionException; -import org.opensearch.action.search.SearchRequestBuilder; -import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.WriteRequest.RefreshPolicy; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.query.QueryBuilders; -import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.search.rescore.QueryRescorerBuilder; -import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.ExecutionException; - -import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.index.query.QueryBuilders.boolQuery; -import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.index.query.QueryBuilders.queryStringQuery; -import static org.opensearch.index.query.QueryBuilders.rangeQuery; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; - -public class ParameterizedSimpleSearchIT extends ParameterizedOpenSearchIntegTestCase { - - public ParameterizedSimpleSearchIT(Settings settings) { - super(settings); - } - - @ParametersFactory - public static Collection parameters() { - return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } - ); - } - - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - - public void testSearchNullIndex() { - expectThrows( - NullPointerException.class, - () -> client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() - ); - - expectThrows( - NullPointerException.class, - () -> client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() - ); - - } - - public void testSearchRandomPreference() throws InterruptedException, ExecutionException { - createIndex("test"); - indexRandom( - true, - client().prepareIndex("test").setId("1").setSource("field", "value"), - client().prepareIndex("test").setId("2").setSource("field", "value"), - client().prepareIndex("test").setId("3").setSource("field", "value"), - client().prepareIndex("test").setId("4").setSource("field", "value"), - client().prepareIndex("test").setId("5").setSource("field", "value"), - client().prepareIndex("test").setId("6").setSource("field", "value") - ); - - int iters = scaledRandomIntBetween(10, 20); - for (int i = 0; i < iters; i++) { - String randomPreference = randomUnicodeOfLengthBetween(0, 4); - // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary) - while (randomPreference.startsWith("_")) { - randomPreference = randomUnicodeOfLengthBetween(0, 4); - } - // id is not indexed, but lets see that we automatically convert to - SearchResponse searchResponse = client().prepareSearch() - .setQuery(QueryBuilders.matchAllQuery()) - .setPreference(randomPreference) - .get(); - assertHitCount(searchResponse, 6L); - - } - } - - public void testSimpleIp() throws Exception { - createIndex("test"); - - client().admin() - .indices() - .preparePutMapping("test") - .setSource( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("from") - .field("type", "ip") - .endObject() - .startObject("to") - .field("type", "ip") - .endObject() - .endObject() - .endObject() - .endObject() - ) - .get(); - - client().prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); - - SearchResponse search = client().prepareSearch() - .setQuery(boolQuery().must(rangeQuery("from").lte("192.168.0.7")).must(rangeQuery("to").gte("192.168.0.7"))) - .get(); - - assertHitCount(search, 1L); - } - - public void testIpCidr() throws Exception { - createIndex("test"); - - client().admin() - .indices() - .preparePutMapping("test") - .setSource( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("ip") - .field("type", "ip") - .endObject() - .endObject() - .endObject() - .endObject() - ) - .get(); - ensureGreen(); - - client().prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); - client().prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); - client().prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); - client().prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); - client().prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); - refresh(); - - SearchResponse search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(queryStringQuery("ip: 192.168.0.1")).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1/32"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.0/24"))).get(); - assertHitCount(search, 3L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.0.0.0/8"))).get(); - assertHitCount(search, 4L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0.0.0.0/0"))).get(); - assertHitCount(search, 4L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::ff00:42:8329/128"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::/64"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "::/0"))).get(); - assertHitCount(search, 5L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.1.5/32"))).get(); - assertHitCount(search, 0L); - - assertFailures( - client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0/0/0/0/0"))), - RestStatus.BAD_REQUEST, - containsString("Expected [ip/prefix] but was [0/0/0/0/0]") - ); - } - - public void testSimpleId() { - createIndex("test"); - - client().prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - // id is not indexed, but lets see that we automatically convert to - SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).get(); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).get(); - assertHitCount(searchResponse, 1L); - } - - public void testSimpleDateRange() throws Exception { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); - client().prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); - ensureGreen(); - refresh(); - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lte("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lt("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gt("2010-01-05T02:00").lt("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 0L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.queryStringQuery("field:[2010-01-03||+2d TO 2010-01-04||+2d/d]")) - .get(); - assertHitCount(searchResponse, 2L); - } - - // TODO: combine this test with SimpleSearchIT.testSimpleTerminateAfterCount after - // https://github.com/opensearch-project/OpenSearch/issues/8371 - public void testSimpleTerminateAfterCountWithSizeAndTrackHits() throws Exception { - prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); - ensureGreen(); - int numDocs = randomIntBetween(15, 29); - List docbuilders = new ArrayList<>(numDocs); - - for (int i = 1; i <= numDocs; i++) { - String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); - } - - indexRandom(true, docbuilders); - ensureGreen(); - refresh(); - - SearchResponse searchResponse; - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.matchAllQuery()) - .setTerminateAfter(numDocs) - .setSize(0) - .setTrackTotalHits(true) - .get(); - assertEquals(0, searchResponse.getFailedShards()); - } - - public void testSimpleIndexSortEarlyTerminate() throws Exception { - prepareCreate("test").setSettings( - Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") - ).setMapping("rank", "type=integer").get(); - ensureGreen(); - int max = randomIntBetween(3, 29); - List docbuilders = new ArrayList<>(max); - - for (int i = max - 1; i >= 0; i--) { - String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("rank", i)); - } - - indexRandom(true, docbuilders); - ensureGreen(); - refresh(); - - SearchResponse searchResponse; - for (int i = 1; i < max; i++) { - searchResponse = client().prepareSearch("test") - .addDocValueField("rank") - .setTrackTotalHits(false) - .addSort("rank", SortOrder.ASC) - .setSize(i) - .get(); - assertNull(searchResponse.getHits().getTotalHits()); - for (int j = 0; j < i; j++) { - assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); - } - } - } - - public void testInsaneFromAndSize() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertWindowFails(client().prepareSearch("idx").setFrom(Integer.MAX_VALUE)); - assertWindowFails(client().prepareSearch("idx").setSize(Integer.MAX_VALUE)); - } - - public void testTooLargeFromAndSize() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertWindowFails(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); - assertWindowFails(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); - assertWindowFails( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - ); - } - - public void testLargeFromAndSizeSucceeds() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2 - 1) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeOkBySetting() throws Exception { - prepareCreate("idx").setSettings( - Settings.builder() - .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) - ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings( - Settings.builder() - .put( - IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), - IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2 - ) - ) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { - prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) - .get(), - 1 - ); - } - - public void testTooLargeRescoreWindow() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertRescoreWindowFails(Integer.MAX_VALUE); - assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); - } - - public void testTooLargeRescoreOkBySetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) - .get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - prepareCreate("idx").setSettings( - Settings.builder() - .put( - IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), // Note that this is the RESULT window. - defaultMaxWindow * 2 - ) - ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByDynamicSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings( - // Note that this is the RESULT window - Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), defaultMaxWindow * 2) - ) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testQueryNumericFieldWithRegex() throws Exception { - assertAcked(prepareCreate("idx").setMapping("num", "type=integer")); - ensureGreen("idx"); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", "34")).get(); - fail("SearchPhaseExecutionException should have been thrown"); - } catch (SearchPhaseExecutionException ex) { - assertThat(ex.getRootCause().getMessage(), containsString("Can only use regexp queries on keyword and text fields")); - } - } - - public void testTermQueryBigInt() throws Exception { - prepareCreate("idx").setMapping("field", "type=keyword").get(); - ensureGreen("idx"); - - client().prepareIndex("idx") - .setId("1") - .setSource("{\"field\" : 80315953321748200608 }", MediaTypeRegistry.JSON) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - - String queryJson = "{ \"field\" : { \"value\" : 80315953321748200608 } }"; - XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); - parser.nextToken(); - TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(query).get(); - assertEquals(1, searchResponse.getHits().getTotalHits().value); - } - - public void testTooLongRegexInRegexpQuery() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); - StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); - while (regexp.length() <= defaultMaxRegexLength) { - regexp.append("]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*(?:,@(?:(?:\\r\\n)?[ \\t])*(?:[^()<>@,;:\\\\\".\\"); - } - SearchPhaseExecutionException e = expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())).get() - ); - assertThat( - e.getRootCause().getMessage(), - containsString( - "The length of regex [" - + regexp.length() - + "] used in the Regexp Query request has exceeded " - + "the allowed maximum of [" - + defaultMaxRegexLength - + "]. " - + "This maximum can be set by changing the [" - + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() - + "] index level setting." - ) - ); - } - - private void assertWindowFails(SearchRequestBuilder search) { - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); - assertThat( - e.toString(), - containsString( - "Result window is too large, from + size must be less than or equal to: [" - + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - ) - ); - assertThat(e.toString(), containsString("See the scroll api for a more efficient way to request large data sets")); - } - - private void assertRescoreWindowFails(int windowSize) { - SearchRequestBuilder search = client().prepareSearch("idx") - .addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(windowSize)); - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); - assertThat( - e.toString(), - containsString( - "Rescore window [" - + windowSize - + "] is too large. It must " - + "be less than [" - + IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) - ) - ); - assertThat( - e.toString(), - containsString( - "This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting." - ) - ); - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java index 67e460653245e..95b36311f6b8b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java @@ -6,24 +6,283 @@ * compatible open source license. */ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + package org.opensearch.search.simple; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.rescore.QueryRescorerBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; +import java.util.concurrent.ExecutionException; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.index.query.QueryBuilders.boolQuery; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.index.query.QueryBuilders.queryStringQuery; +import static org.opensearch.index.query.QueryBuilders.rangeQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.apache.lucene.search.TotalHits.Relation.EQUAL_TO; +import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; + +public class SimpleSearchIT extends ParameterizedOpenSearchIntegTestCase { + + public SimpleSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + + public void testSearchNullIndex() { + expectThrows( + NullPointerException.class, + () -> client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() + ); + + expectThrows( + NullPointerException.class, + () -> client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() + ); + + } + + public void testSearchRandomPreference() throws InterruptedException, ExecutionException { + createIndex("test"); + indexRandom( + true, + client().prepareIndex("test").setId("1").setSource("field", "value"), + client().prepareIndex("test").setId("2").setSource("field", "value"), + client().prepareIndex("test").setId("3").setSource("field", "value"), + client().prepareIndex("test").setId("4").setSource("field", "value"), + client().prepareIndex("test").setId("5").setSource("field", "value"), + client().prepareIndex("test").setId("6").setSource("field", "value") + ); + + int iters = scaledRandomIntBetween(10, 20); + for (int i = 0; i < iters; i++) { + String randomPreference = randomUnicodeOfLengthBetween(0, 4); + // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary) + while (randomPreference.startsWith("_")) { + randomPreference = randomUnicodeOfLengthBetween(0, 4); + } + // id is not indexed, but lets see that we automatically convert to + SearchResponse searchResponse = client().prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .setPreference(randomPreference) + .get(); + assertHitCount(searchResponse, 6L); + + } + } + + public void testSimpleIp() throws Exception { + createIndex("test"); + + client().admin() + .indices() + .preparePutMapping("test") + .setSource( + XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("from") + .field("type", "ip") + .endObject() + .startObject("to") + .field("type", "ip") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .get(); + + client().prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); + + SearchResponse search = client().prepareSearch() + .setQuery(boolQuery().must(rangeQuery("from").lte("192.168.0.7")).must(rangeQuery("to").gte("192.168.0.7"))) + .get(); + + assertHitCount(search, 1L); + } + + public void testIpCidr() throws Exception { + createIndex("test"); + + client().admin() + .indices() + .preparePutMapping("test") + .setSource( + XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("ip") + .field("type", "ip") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .get(); + ensureGreen(); + + client().prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); + client().prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); + client().prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); + client().prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); + client().prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); + refresh(); + + SearchResponse search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(queryStringQuery("ip: 192.168.0.1")).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1/32"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.0/24"))).get(); + assertHitCount(search, 3L); -public class SimpleSearchIT extends OpenSearchIntegTestCase { + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.0.0.0/8"))).get(); + assertHitCount(search, 4L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0.0.0.0/0"))).get(); + assertHitCount(search, 4L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::ff00:42:8329/128"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::/64"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "::/0"))).get(); + assertHitCount(search, 5L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.1.5/32"))).get(); + assertHitCount(search, 0L); + + assertFailures( + client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0/0/0/0/0"))), + RestStatus.BAD_REQUEST, + containsString("Expected [ip/prefix] but was [0/0/0/0/0]") + ); + } + + public void testSimpleId() { + createIndex("test"); + + client().prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + // id is not indexed, but lets see that we automatically convert to + SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).get(); + assertHitCount(searchResponse, 1L); + + searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).get(); + assertHitCount(searchResponse, 1L); + } + + public void testSimpleDateRange() throws Exception { + createIndex("test"); + client().prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); + client().prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); + ensureGreen(); + refresh(); + SearchResponse searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lte("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lt("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gt("2010-01-05T02:00").lt("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 0L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.queryStringQuery("field:[2010-01-03||+2d TO 2010-01-04||+2d/d]")) + .get(); + assertHitCount(searchResponse, 2L); + } - // TODO: Move this test to ParameterizedSimpleSearchIT after https://github.com/opensearch-project/OpenSearch/issues/8371 public void testSimpleTerminateAfterCount() throws Exception { prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen(); @@ -40,13 +299,18 @@ public void testSimpleTerminateAfterCount() throws Exception { refresh(); SearchResponse searchResponse; + int size; for (int i = 1; i < max; i++) { + size = randomIntBetween(0, max); searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)) .setTerminateAfter(i) + .setSize(size) + .setTrackTotalHits(true) .get(); assertHitCount(searchResponse, i); assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(Math.min(i, size), searchResponse.getHits().getHits().length); } searchResponse = client().prepareSearch("test") @@ -57,4 +321,379 @@ public void testSimpleTerminateAfterCount() throws Exception { assertHitCount(searchResponse, max); assertFalse(searchResponse.isTerminatedEarly()); } + + public void testSimpleTerminateAfterTrackTotalHitsUpTo() throws Exception { + prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); + ensureGreen(); + int numDocs = 29; + List docbuilders = new ArrayList<>(numDocs); + + for (int i = 1; i <= numDocs; i++) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + // size=0 is a special case where topDocsCollector is not added + int size = randomIntBetween(0, 1); + SearchResponse searchResponse; + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(10) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(10) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHits(true) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(numDocs * 2) + .setSize(size) + .setTrackTotalHits(true) + .get(); + assertFalse(searchResponse.isTerminatedEarly()); + assertEquals(numDocs, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + } + + public void testSimpleIndexSortEarlyTerminate() throws Exception { + prepareCreate("test").setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") + ).setMapping("rank", "type=integer").get(); + ensureGreen(); + int max = randomIntBetween(3, 29); + List docbuilders = new ArrayList<>(max); + + for (int i = max - 1; i >= 0; i--) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test").setId(id).setSource("rank", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + SearchResponse searchResponse; + for (int i = 1; i < max; i++) { + searchResponse = client().prepareSearch("test") + .addDocValueField("rank") + .setTrackTotalHits(false) + .addSort("rank", SortOrder.ASC) + .setSize(i) + .get(); + assertNull(searchResponse.getHits().getTotalHits()); + for (int j = 0; j < i; j++) { + assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); + } + } + } + + public void testInsaneFromAndSize() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertWindowFails(client().prepareSearch("idx").setFrom(Integer.MAX_VALUE)); + assertWindowFails(client().prepareSearch("idx").setSize(Integer.MAX_VALUE)); + } + + public void testTooLargeFromAndSize() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertWindowFails(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); + assertWindowFails(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); + assertWindowFails( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + ); + } + + public void testLargeFromAndSizeSucceeds() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2 - 1) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeOkBySetting() throws Exception { + prepareCreate("idx").setSettings( + Settings.builder() + .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) + ).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings( + Settings.builder() + .put( + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2 + ) + ) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { + prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) + .get(), + 1 + ); + } + + public void testTooLargeRescoreWindow() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertRescoreWindowFails(Integer.MAX_VALUE); + assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); + } + + public void testTooLargeRescoreOkBySetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) + .get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + prepareCreate("idx").setSettings( + Settings.builder() + .put( + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), // Note that this is the RESULT window. + defaultMaxWindow * 2 + ) + ).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByDynamicSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings( + // Note that this is the RESULT window + Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), defaultMaxWindow * 2) + ) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testQueryNumericFieldWithRegex() throws Exception { + assertAcked(prepareCreate("idx").setMapping("num", "type=integer")); + ensureGreen("idx"); + + try { + client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", "34")).get(); + fail("SearchPhaseExecutionException should have been thrown"); + } catch (SearchPhaseExecutionException ex) { + assertThat(ex.getRootCause().getMessage(), containsString("Can only use regexp queries on keyword and text fields")); + } + } + + public void testTermQueryBigInt() throws Exception { + prepareCreate("idx").setMapping("field", "type=keyword").get(); + ensureGreen("idx"); + + client().prepareIndex("idx") + .setId("1") + .setSource("{\"field\" : 80315953321748200608 }", MediaTypeRegistry.JSON) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); + + String queryJson = "{ \"field\" : { \"value\" : 80315953321748200608 } }"; + XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); + parser.nextToken(); + TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(query).get(); + assertEquals(1, searchResponse.getHits().getTotalHits().value); + } + + public void testTooLongRegexInRegexpQuery() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); + StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); + while (regexp.length() <= defaultMaxRegexLength) { + regexp.append("]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*(?:,@(?:(?:\\r\\n)?[ \\t])*(?:[^()<>@,;:\\\\\".\\"); + } + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())).get() + ); + assertThat( + e.getRootCause().getMessage(), + containsString( + "The length of regex [" + + regexp.length() + + "] used in the Regexp Query request has exceeded " + + "the allowed maximum of [" + + defaultMaxRegexLength + + "]. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() + + "] index level setting." + ) + ); + } + + private void assertWindowFails(SearchRequestBuilder search) { + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + assertThat( + e.toString(), + containsString( + "Result window is too large, from + size must be less than or equal to: [" + + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + ) + ); + assertThat(e.toString(), containsString("See the scroll api for a more efficient way to request large data sets")); + } + + private void assertRescoreWindowFails(int windowSize) { + SearchRequestBuilder search = client().prepareSearch("idx") + .addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(windowSize)); + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + assertThat( + e.toString(), + containsString( + "Rescore window [" + + windowSize + + "] is too large. It must " + + "be less than [" + + IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + ) + ); + assertThat( + e.toString(), + containsString( + "This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting." + ) + ); + } } diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 28931bb5a860f..960b46d68977b 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -896,6 +896,8 @@ public void evaluateRequestShouldUseConcurrentSearch() { && aggregations().factories() != null && !aggregations().factories().allFactoriesSupportConcurrentSearch()) { requestShouldUseConcurrentSearch.set(false); + } else if (terminateAfter != DEFAULT_TERMINATE_AFTER) { + requestShouldUseConcurrentSearch.set(false); } else { requestShouldUseConcurrentSearch.set(true); } From 2df9eed412c93c52ac404d6358bdc35685ee853f Mon Sep 17 00:00:00 2001 From: rishavz_sagar Date: Tue, 3 Oct 2023 16:58:31 +0530 Subject: [PATCH 12/26] Add unreferenced file cleanup count to merge stats (#10204) Signed-off-by: Rishav Sagar Co-authored-by: Rishav Sagar --- CHANGELOG.md | 1 + .../org/opensearch/index/engine/Engine.java | 12 +++++++++- .../opensearch/index/merge/MergeStats.java | 22 +++++++++++++++++-- .../opensearch/index/shard/IndexShard.java | 4 +++- .../index/engine/InternalEngineTests.java | 10 ++++++++- 5 files changed, 44 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9963eaef31d33..97a1386315dc7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -89,6 +89,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) - Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) - Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) +- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) ### Dependencies - Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index a8f2f60f8cf12..9b0e1b70a8e05 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -145,6 +145,7 @@ public abstract class Engine implements LifecycleAware, Closeable { protected final EngineConfig engineConfig; protected final Store store; protected final AtomicBoolean isClosed = new AtomicBoolean(false); + private final CounterMetric totalUnreferencedFileCleanUpsPerformed = new CounterMetric(); private final CountDownLatch closedLatch = new CountDownLatch(1); protected final EventListener eventListener; protected final ReentrantLock failEngineLock = new ReentrantLock(); @@ -267,6 +268,13 @@ protected final DocsStats docsStats(IndexReader indexReader) { return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); } + /** + * Returns the unreferenced file cleanup count for this engine. + */ + public long unreferencedFileCleanUpsPerformed() { + return totalUnreferencedFileCleanUpsPerformed.count(); + } + /** * Performs the pre-closing checks on the {@link Engine}. * @@ -1340,7 +1348,9 @@ private void cleanUpUnreferencedFiles() { .setOpenMode(IndexWriterConfig.OpenMode.APPEND) ) ) { - // do nothing and close this will kick off IndexFileDeleter which will remove all unreferenced files. + // do nothing except increasing metric count and close this will kick off IndexFileDeleter which will + // remove all unreferenced files + totalUnreferencedFileCleanUpsPerformed.inc(); } catch (Exception ex) { logger.error("Error while deleting unreferenced file ", ex); } diff --git a/server/src/main/java/org/opensearch/index/merge/MergeStats.java b/server/src/main/java/org/opensearch/index/merge/MergeStats.java index 37fdca8871b18..fc5bac24a60d6 100644 --- a/server/src/main/java/org/opensearch/index/merge/MergeStats.java +++ b/server/src/main/java/org/opensearch/index/merge/MergeStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.merge; +import org.opensearch.Version; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -65,9 +66,9 @@ public class MergeStats implements Writeable, ToXContentFragment { private long totalBytesPerSecAutoThrottle; - public MergeStats() { + private long unreferencedFileCleanUpsPerformed; - } + public MergeStats() {} public MergeStats(StreamInput in) throws IOException { total = in.readVLong(); @@ -81,6 +82,9 @@ public MergeStats(StreamInput in) throws IOException { totalStoppedTimeInMillis = in.readVLong(); totalThrottledTimeInMillis = in.readVLong(); totalBytesPerSecAutoThrottle = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + unreferencedFileCleanUpsPerformed = in.readOptionalVLong(); + } } public void add( @@ -133,6 +137,7 @@ public void addTotals(MergeStats mergeStats) { this.totalSizeInBytes += mergeStats.totalSizeInBytes; this.totalStoppedTimeInMillis += mergeStats.totalStoppedTimeInMillis; this.totalThrottledTimeInMillis += mergeStats.totalThrottledTimeInMillis; + addUnreferencedFileCleanUpStats(mergeStats.unreferencedFileCleanUpsPerformed); if (this.totalBytesPerSecAutoThrottle == Long.MAX_VALUE || mergeStats.totalBytesPerSecAutoThrottle == Long.MAX_VALUE) { this.totalBytesPerSecAutoThrottle = Long.MAX_VALUE; } else { @@ -140,6 +145,14 @@ public void addTotals(MergeStats mergeStats) { } } + public void addUnreferencedFileCleanUpStats(long unreferencedFileCleanUpsPerformed) { + this.unreferencedFileCleanUpsPerformed += unreferencedFileCleanUpsPerformed; + } + + public long getUnreferencedFileCleanUpsPerformed() { + return this.unreferencedFileCleanUpsPerformed; + } + /** * The total number of merges executed. */ @@ -240,6 +253,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC).value(new ByteSizeValue(totalBytesPerSecAutoThrottle).toString()); } builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES, totalBytesPerSecAutoThrottle); + builder.field(Fields.UNREFERENCED_FILE_CLEANUPS_PERFORMED, unreferencedFileCleanUpsPerformed); builder.endObject(); return builder; } @@ -267,6 +281,7 @@ static final class Fields { static final String TOTAL_SIZE_IN_BYTES = "total_size_in_bytes"; static final String TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES = "total_auto_throttle_in_bytes"; static final String TOTAL_THROTTLE_BYTES_PER_SEC = "total_auto_throttle"; + static final String UNREFERENCED_FILE_CLEANUPS_PERFORMED = "unreferenced_file_cleanups_performed"; } @Override @@ -282,5 +297,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(totalStoppedTimeInMillis); out.writeVLong(totalThrottledTimeInMillis); out.writeVLong(totalBytesPerSecAutoThrottle); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalVLong(unreferencedFileCleanUpsPerformed); + } } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 5ce066b156775..bc9d839624740 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1383,7 +1383,9 @@ public MergeStats mergeStats() { if (engine == null) { return new MergeStats(); } - return engine.getMergeStats(); + final MergeStats mergeStats = engine.getMergeStats(); + mergeStats.addUnreferencedFileCleanUpStats(engine.unreferencedFileCleanUpsPerformed()); + return mergeStats; } public SegmentsStats segmentStats(boolean includeSegmentFileSizes, boolean includeUnloadedSegments) { diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index e3d77d45861ac..efa13438cffcf 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -233,7 +233,6 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -3334,6 +3333,9 @@ public void onFailedEngine(String reason, Exception e) { ); assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will be incremented whenever cleanup is performed correctly. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(1L)); } catch (Exception ex) { throw new AssertionError(ex); } @@ -3445,6 +3447,9 @@ public void onFailedEngine(String reason, Exception e) { ); assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will not be incremented whenever cleanup is disabled. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(0L)); } catch (Exception ex) { throw new AssertionError(ex); } @@ -3549,6 +3554,9 @@ public void onFailedEngine(String reason, Exception e) { ); assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will not be incremented whenever there is some issue with cleanup. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(0L)); } catch (Exception ex) { throw new AssertionError(ex); } From 20989755b2660ce9674fe2d2d95b125a6ffbfc7d Mon Sep 17 00:00:00 2001 From: Jongwoo Han Date: Tue, 3 Oct 2023 20:51:34 +0900 Subject: [PATCH 13/26] Replace deprecated command with environment file (#10289) Signed-off-by: Jongwoo Han --- .github/workflows/lucene-snapshots.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml index c2a2cedaaefb4..76981276fe085 100644 --- a/.github/workflows/lucene-snapshots.yml +++ b/.github/workflows/lucene-snapshots.yml @@ -38,7 +38,7 @@ jobs: - name: Set hash working-directory: ./lucene run: | - echo "::set-output name=REVISION::$(git rev-parse --short HEAD)" + echo "REVISION=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT id: version - name: Initialize gradle settings From fe8068208c6243f1d6b1c22607461d6aa6f6893e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:27:53 -0400 Subject: [PATCH 14/26] Bump peter-evans/create-pull-request from 3 to 5 (#10301) * Bump peter-evans/create-pull-request from 3 to 5 Bumps [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request) from 3 to 5. - [Release notes](https://github.com/peter-evans/create-pull-request/releases) - [Commits](https://github.com/peter-evans/create-pull-request/compare/v3...v5) --- updated-dependencies: - dependency-name: peter-evans/create-pull-request dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/version.yml | 6 +++--- CHANGELOG.md | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index df785bcc70014..a20c671c137b2 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -59,7 +59,7 @@ jobs: sed -i "s/CURRENT = $CURRENT_VERSION_UNDERSCORE;/CURRENT = $NEXT_VERSION_UNDERSCORE;/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE }} @@ -86,7 +86,7 @@ jobs: sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE_X }} @@ -113,7 +113,7 @@ jobs: sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.github_app_token.outputs.token }} base: main diff --git a/CHANGELOG.md b/CHANGELOG.md index 97a1386315dc7..a6b9ff4eacfa2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -112,6 +112,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump Lucene from 9.7.0 to 9.8.0 ([#10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) - Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) - Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) +- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) @@ -139,4 +140,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x \ No newline at end of file From e7909fc45da3b4e60ea18a0b37563050d3af1d54 Mon Sep 17 00:00:00 2001 From: Bhumika Saini Date: Tue, 3 Oct 2023 20:07:08 +0530 Subject: [PATCH 15/26] Add TODO for updating public documentation (#10242) Signed-off-by: Bhumika Saini --- .github/pull_request_template.md | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 69616e533d1ed..cd7c1bb980eec 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -19,6 +19,7 @@ Resolves #[Issue number to be closed when this PR is merged] - [ ] New functionality has javadoc added - [ ] Commits are signed per the DCO using --signoff - [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) +- [ ] GitHub issue/PR created in [OpenSearch documentation repo](https://github.com/opensearch-project/documentation-website) for the required public documentation changes (#[Issue/PR number]) By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). From 5aa18636706597351aacfa7fc51364cd54ba2c04 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 11:11:27 -0400 Subject: [PATCH 16/26] Bump com.netflix.nebula.ospackage-base from 11.4.0 to 11.5.0 in /distribution/packages (#10295) * Bump com.netflix.nebula.ospackage-base in /distribution/packages Bumps com.netflix.nebula.ospackage-base from 11.4.0 to 11.5.0. --- updated-dependencies: - dependency-name: com.netflix.nebula.ospackage-base dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Andriy Redko Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Andriy Redko --- CHANGELOG.md | 9 +++++---- distribution/packages/build.gradle | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a6b9ff4eacfa2..8a37866012ba1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -106,10 +106,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.4 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.0 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208)) - Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump Lucene from 9.7.0 to 9.8.0 ([#10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) - Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) - Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) - Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 262ad6c802bbb..cb05661dc74a4 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.4.0" + id "com.netflix.nebula.ospackage-base" version "11.5.0" } void addProcessFilesTask(String type, boolean jdk) { From bddf0d3762f55d959ae76f3dea06259e7e795ebe Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 3 Oct 2023 16:07:34 -0400 Subject: [PATCH 17/26] Fix the HttpChannel trace termination in case of exceptions (#10325) Signed-off-by: Andriy Redko --- .../org/opensearch/telemetry/tracing/DefaultTracer.java | 5 ++++- .../org/opensearch/http/AbstractHttpServerTransport.java | 1 + server/src/main/java/org/opensearch/http/HttpChannel.java | 5 +++++ .../telemetry/tracing/channels/TraceableHttpChannel.java | 7 +++++++ .../tracing/handler/TraceableTransportResponseHandler.java | 1 + .../org/opensearch/transport/TransportResponseHandler.java | 2 +- 6 files changed, 19 insertions(+), 2 deletions(-) diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java index d3c28b3a9cb5e..79b7e4aca6c2f 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -25,7 +25,10 @@ */ @InternalApi class DefaultTracer implements Tracer { - static final String THREAD_NAME = "th_name"; + /** + * Current thread name. + */ + static final String THREAD_NAME = "thread.name"; private final TracingTelemetry tracingTelemetry; private final TracerContextStorage tracerContextStorage; diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index ed44102d0abe4..b8f8abb6c2c23 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -298,6 +298,7 @@ static int resolvePublishPort(Settings settings, List boundAdd } public void onException(HttpChannel channel, Exception e) { + channel.handleException(e); if (lifecycle.started() == false) { // just close and ignore - we are already stopped and just need to make sure we release all resources CloseableChannel.closeChannel(channel); diff --git a/server/src/main/java/org/opensearch/http/HttpChannel.java b/server/src/main/java/org/opensearch/http/HttpChannel.java index 99aaed23c69b8..6dcdaf9034413 100644 --- a/server/src/main/java/org/opensearch/http/HttpChannel.java +++ b/server/src/main/java/org/opensearch/http/HttpChannel.java @@ -43,6 +43,11 @@ * @opensearch.internal */ public interface HttpChannel extends CloseableChannel { + /** + * Notify HTTP channel that exception happens and the response may not be sent (for example, timeout) + * @param ex the exception being raised + */ + default void handleException(Exception ex) {} /** * Sends an http response to the channel. The listener will be executed once the send process has been diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java index 9229d334dea01..03848e8e58207 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java @@ -56,6 +56,13 @@ public static HttpChannel create(HttpChannel delegate, Span span, Tracer tracer) } } + @Override + public void handleException(Exception ex) { + span.addEvent("The HttpChannel was closed without sending the response"); + span.setError(ex); + span.endSpan(); + } + @Override public void close() { delegate.close(); diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java index abddfcc6cebc1..538bf82a1dbec 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java @@ -101,6 +101,7 @@ public void handleRejection(Exception exp) { try (SpanScope scope = tracer.withSpanInScope(span)) { delegate.handleRejection(exp); } finally { + span.setError(exp); span.endSpan(); } } diff --git a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java index 90e94e52515ce..8992af18edb48 100644 --- a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java @@ -57,7 +57,7 @@ public interface TransportResponseHandler extends W * It should be used to clear up the resources held by the {@link TransportResponseHandler}. * @param exp exception */ - default void handleRejection(Exception exp) {}; + default void handleRejection(Exception exp) {} default TransportResponseHandler wrap(Function converter, Writeable.Reader reader) { final TransportResponseHandler self = this; From d5a95b81045828cfbd4926a10685eb03a83457c0 Mon Sep 17 00:00:00 2001 From: rishavz_sagar Date: Wed, 4 Oct 2023 03:40:21 +0530 Subject: [PATCH 18/26] Changing unrefernced file cleanup stats based MergeStats metrics version to 2.11 (#10336) Signed-off-by: Rishav Sagar Co-authored-by: Rishav Sagar --- .../src/main/java/org/opensearch/index/merge/MergeStats.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/merge/MergeStats.java b/server/src/main/java/org/opensearch/index/merge/MergeStats.java index fc5bac24a60d6..a284cec247ff1 100644 --- a/server/src/main/java/org/opensearch/index/merge/MergeStats.java +++ b/server/src/main/java/org/opensearch/index/merge/MergeStats.java @@ -82,7 +82,7 @@ public MergeStats(StreamInput in) throws IOException { totalStoppedTimeInMillis = in.readVLong(); totalThrottledTimeInMillis = in.readVLong(); totalBytesPerSecAutoThrottle = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { unreferencedFileCleanUpsPerformed = in.readOptionalVLong(); } } @@ -297,7 +297,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(totalStoppedTimeInMillis); out.writeVLong(totalThrottledTimeInMillis); out.writeVLong(totalBytesPerSecAutoThrottle); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { out.writeOptionalVLong(unreferencedFileCleanUpsPerformed); } } From 7159e2e36ab00906ae83c50001fce41347ac514b Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Wed, 4 Oct 2023 14:53:53 +0530 Subject: [PATCH 19/26] Add node id to segment and translog metadata (#10229) * Adding node id to segment and translog metadata Adding validation to identify multiple writers to same primary term and generation in remote store Signed-off-by: Gaurav Bafna * Fix bug to find node id Signed-off-by: Gaurav Bafna * Spotless fix Signed-off-by: Gaurav Bafna * Moving node id before uuid as it was interfering with _ character split Signed-off-by: Gaurav Bafna * Removing uuid from segment metadata file Signed-off-by: Gaurav Bafna * simplifying the detection logic Signed-off-by: Gaurav Bafna * PR comments Signed-off-by: Gaurav Bafna * Addressing PR comments Signed-off-by: Gaurav Bafna * Adding translog gen to remote segment metadata fn as well Signed-off-by: Gaurav Bafna * spotless fix Signed-off-by: Gaurav Bafna * reducing METADATA_FILES_TO_FETCH to 10 Signed-off-by: Gaurav Bafna * adding missing import Signed-off-by: Gaurav Bafna --------- Signed-off-by: Gaurav Bafna --- .../opensearch/index/shard/IndexShardIT.java | 14 ++- .../org/opensearch/index/IndexService.java | 3 +- .../index/remote/RemoteStoreUtils.java | 33 +++++ .../opensearch/index/shard/IndexShard.java | 9 +- .../shard/RemoteStoreRefreshListener.java | 3 +- .../store/RemoteSegmentStoreDirectory.java | 33 ++++- .../index/translog/RemoteFsTranslog.java | 3 +- .../index/translog/TranslogConfig.java | 19 ++- .../translog/TruncateTranslogAction.java | 3 +- .../TranslogCheckpointTransferSnapshot.java | 21 +++- .../transfer/TranslogTransferManager.java | 12 +- .../transfer/TranslogTransferMetadata.java | 37 +++++- .../index/engine/InternalEngineTests.java | 8 +- .../index/remote/RemoteStoreUtilsTests.java | 119 ++++++++++++++++++ .../index/shard/RefreshListenersTests.java | 3 +- .../RemoteStoreRefreshListenerTests.java | 10 +- ...moteSegmentStoreDirectoryFactoryTests.java | 10 +- .../RemoteSegmentStoreDirectoryTests.java | 87 +++++++++---- .../InternalTranslogManagerTests.java | 14 +-- .../index/translog/LocalTranslogTests.java | 8 +- .../index/translog/RemoteFsTranslogTests.java | 8 +- .../translog/TranslogManagerTestCase.java | 2 +- .../TranslogTransferManagerTests.java | 49 +++++++- .../index/engine/EngineTestCase.java | 12 +- .../index/shard/IndexShardTestCase.java | 3 +- 25 files changed, 445 insertions(+), 78 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index bb08b19df765b..07f85496f13cf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -650,7 +650,15 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul } } }; - final IndexShard newShard = newIndexShard(indexService, shard, wrapper, getInstanceFromNode(CircuitBreakerService.class), listener); + NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); + final IndexShard newShard = newIndexShard( + indexService, + shard, + wrapper, + getInstanceFromNode(CircuitBreakerService.class), + env.nodeId(), + listener + ); shardRef.set(newShard); recoverShard(newShard); @@ -674,6 +682,7 @@ public static final IndexShard newIndexShard( final IndexShard shard, CheckedFunction wrapper, final CircuitBreakerService cbs, + final String nodeId, final IndexingOperationListener... listeners ) throws IOException { ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry()); @@ -702,7 +711,8 @@ public static final IndexShard newIndexShard( SegmentReplicationCheckpointPublisher.EMPTY, null, null, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + nodeId ); } diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index ca0cc307e460b..fdda8d4ce2497 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -519,7 +519,8 @@ public synchronized IndexShard createShard( this.indexSettings.isSegRepEnabled() ? checkpointPublisher : null, remoteStore, remoteStoreStatsTrackerFactory, - clusterRemoteTranslogBufferIntervalSupplier + clusterRemoteTranslogBufferIntervalSupplier, + nodeEnv.nodeId() ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 114d07589b0c0..0ca9e0209c5ec 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -8,7 +8,13 @@ package org.opensearch.index.remote; +import org.opensearch.common.collect.Tuple; + import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; /** * Utils for remote store @@ -69,4 +75,31 @@ public static String getSegmentName(String filename) { return filename.substring(0, endIdx); } + + /** + * + * @param mdFiles List of segment/translog metadata files + * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . + * fn returns null if node id is not part of the file name + */ + static public void verifyNoMultipleWriters(List mdFiles, Function> fn) { + Map nodesByPrimaryTermAndGen = new HashMap<>(); + mdFiles.forEach(mdFile -> { + Tuple nodeIdByPrimaryTermAndGen = fn.apply(mdFile); + if (nodeIdByPrimaryTermAndGen != null) { + if (nodesByPrimaryTermAndGen.containsKey(nodeIdByPrimaryTermAndGen.v1()) + && (!nodesByPrimaryTermAndGen.get(nodeIdByPrimaryTermAndGen.v1()).equals(nodeIdByPrimaryTermAndGen.v2()))) { + throw new IllegalStateException( + "Multiple metadata files from different nodes" + + nodeIdByPrimaryTermAndGen.v1() + + " and " + + nodeIdByPrimaryTermAndGen.v2() + + "having same primary term and generations detected" + ); + } + nodesByPrimaryTermAndGen.put(nodeIdByPrimaryTermAndGen.v1(), nodeIdByPrimaryTermAndGen.v2()); + } + }); + } + } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index bc9d839624740..5818b2d866854 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -367,7 +367,8 @@ public IndexShard( @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher, @Nullable final Store remoteStore, final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, - final Supplier clusterRemoteTranslogBufferIntervalSupplier + final Supplier clusterRemoteTranslogBufferIntervalSupplier, + final String nodeId ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -413,7 +414,7 @@ public IndexShard( logger.debug("state: [CREATED]"); this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP); - this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays); + this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays, nodeId); final String aId = shardRouting.allocationId().getId(); final long primaryTerm = indexSettings.getIndexMetadata().primaryTerm(shardId.id()); this.pendingPrimaryTerm = primaryTerm; @@ -556,6 +557,10 @@ protected RemoteStoreStatsTrackerFactory getRemoteStoreStatsTrackerFactory() { return remoteStoreStatsTrackerFactory; } + public String getNodeId() { + return translogConfig.getNodeId(); + } + @Override public void updateShardState( final ShardRouting newRouting, diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 695c01367171a..698e61f6f7a09 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -346,7 +346,8 @@ void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos se segmentInfosSnapshot, storeDirectory, translogFileGeneration, - replicationCheckpoint + replicationCheckpoint, + indexShard.getNodeId() ); } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index b23d2d7d0a3f8..21a84f2b8c903 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -25,6 +25,7 @@ import org.apache.lucene.util.Version; import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.store.ByteArrayIndexInput; @@ -114,6 +115,8 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private final AtomicLong metadataUploadCounter = new AtomicLong(0); + public static final int METADATA_FILES_TO_FETCH = 10; + public RemoteSegmentStoreDirectory( RemoteDirectory remoteDataDirectory, RemoteDirectory remoteMetadataDirectory, @@ -187,9 +190,11 @@ public RemoteSegmentMetadata readLatestMetadataFile() throws IOException { List metadataFiles = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ); + RemoteStoreUtils.verifyNoMultipleWriters(metadataFiles, MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen); + if (metadataFiles.isEmpty() == false) { String latestMetadataFile = metadataFiles.get(0); logger.trace("Reading latest Metadata file {}", latestMetadataFile); @@ -306,12 +311,13 @@ static String getMetadataFilePrefixForCommit(long primaryTerm, long generation) } // Visible for testing - static String getMetadataFilename( + public static String getMetadataFilename( long primaryTerm, long generation, long translogGeneration, long uploadCounter, - int metadataVersion + int metadataVersion, + String nodeId ) { return String.join( SEPARATOR, @@ -320,6 +326,7 @@ static String getMetadataFilename( RemoteStoreUtils.invertLong(generation), RemoteStoreUtils.invertLong(translogGeneration), RemoteStoreUtils.invertLong(uploadCounter), + nodeId, RemoteStoreUtils.invertLong(System.currentTimeMillis()), String.valueOf(metadataVersion) ); @@ -334,6 +341,19 @@ static long getPrimaryTerm(String[] filenameTokens) { static long getGeneration(String[] filenameTokens) { return RemoteStoreUtils.invertLong(filenameTokens[2]); } + + public static Tuple getNodeIdByPrimaryTermAndGen(String filename) { + String[] tokens = filename.split(SEPARATOR); + if (tokens.length < 8) { + // For versions < 2.11, we don't have node id. + return null; + } + String primaryTermAndGen = String.join(SEPARATOR, tokens[1], tokens[2], tokens[3]); + + String nodeId = tokens[5]; + return new Tuple<>(primaryTermAndGen, nodeId); + } + } /** @@ -593,6 +613,7 @@ public boolean containsFile(String localFilename, String checksum) { * @param storeDirectory instance of local directory to temporarily create metadata file before upload * @param translogGeneration translog generation * @param replicationCheckpoint ReplicationCheckpoint of primary shard + * @param nodeId node id * @throws IOException in case of I/O error while uploading the metadata file */ public void uploadMetadata( @@ -600,7 +621,8 @@ public void uploadMetadata( SegmentInfos segmentInfosSnapshot, Directory storeDirectory, long translogGeneration, - ReplicationCheckpoint replicationCheckpoint + ReplicationCheckpoint replicationCheckpoint, + String nodeId ) throws IOException { synchronized (this) { String metadataFilename = MetadataFilenameUtils.getMetadataFilename( @@ -608,7 +630,8 @@ public void uploadMetadata( segmentInfosSnapshot.getGeneration(), translogGeneration, metadataUploadCounter.incrementAndGet(), - RemoteSegmentMetadata.CURRENT_VERSION + RemoteSegmentMetadata.CURRENT_VERSION, + nodeId ); try { try (IndexOutput indexOutput = storeDirectory.createOutput(metadataFilename, IOContext.DEFAULT)) { diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 004d1bfdca36d..857d90e2e2ac2 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -327,7 +327,8 @@ private boolean upload(Long primaryTerm, Long generation) throws IOException { generation, location, readers, - Translog::getCommitCheckpointFileName + Translog::getCommitCheckpointFileName, + config.getNodeId() ).build() ) { return translogTransferManager.transferSnapshot( diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java b/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java index cac88bee82a73..6e75ebd847b5e 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java @@ -56,6 +56,7 @@ public final class TranslogConfig { private final ShardId shardId; private final Path translogPath; private final ByteSizeValue bufferSize; + private final String nodeId; /** * Creates a new TranslogConfig instance @@ -64,16 +65,24 @@ public final class TranslogConfig { * @param indexSettings the index settings used to set internal variables * @param bigArrays a bigArrays instance used for temporarily allocating write operations */ - public TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays) { - this(shardId, translogPath, indexSettings, bigArrays, DEFAULT_BUFFER_SIZE); + public TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays, String nodeId) { + this(shardId, translogPath, indexSettings, bigArrays, DEFAULT_BUFFER_SIZE, nodeId); } - TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays, ByteSizeValue bufferSize) { + TranslogConfig( + ShardId shardId, + Path translogPath, + IndexSettings indexSettings, + BigArrays bigArrays, + ByteSizeValue bufferSize, + String nodeId + ) { this.bufferSize = bufferSize; this.indexSettings = indexSettings; this.shardId = shardId; this.translogPath = translogPath; this.bigArrays = bigArrays; + this.nodeId = nodeId; } /** @@ -110,4 +119,8 @@ public Path getTranslogPath() { public ByteSizeValue getBufferSize() { return bufferSize; } + + public String getNodeId() { + return nodeId; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java index 0d85123b60c75..25fcdc614172a 100644 --- a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java @@ -194,7 +194,8 @@ private boolean isTranslogClean(ShardPath shardPath, ClusterState clusterState, shardPath.getShardId(), translogPath, indexSettings, - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + "" ); long primaryTerm = indexSettings.getIndexMetadata().primaryTerm(shardPath.getShardId().id()); // We open translog to check for corruption, do not clean anything. diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java index 10dec13c81e1a..fb78731246a07 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java @@ -40,11 +40,14 @@ public class TranslogCheckpointTransferSnapshot implements TransferSnapshot, Clo private final long primaryTerm; private long minTranslogGeneration; - TranslogCheckpointTransferSnapshot(long primaryTerm, long generation, int size) { + private String nodeId; + + TranslogCheckpointTransferSnapshot(long primaryTerm, long generation, int size, String nodeId) { translogCheckpointFileInfoTupleSet = new HashSet<>(size); this.size = size; this.generation = generation; this.primaryTerm = primaryTerm; + this.nodeId = nodeId; } private void add(TranslogFileSnapshot translogFileSnapshot, CheckpointFileSnapshot checkPointFileSnapshot) { @@ -63,7 +66,13 @@ public Set getTranslogFileSnapshots() { @Override public TranslogTransferMetadata getTranslogTransferMetadata() { - return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, translogCheckpointFileInfoTupleSet.size() * 2); + return new TranslogTransferMetadata( + primaryTerm, + generation, + minTranslogGeneration, + translogCheckpointFileInfoTupleSet.size() * 2, + nodeId + ); } @Override @@ -110,19 +119,22 @@ public static class Builder { private final List readers; private final Function checkpointGenFileNameMapper; private final Path location; + private final String nodeId; public Builder( long primaryTerm, long generation, Path location, List readers, - Function checkpointGenFileNameMapper + Function checkpointGenFileNameMapper, + String nodeId ) { this.primaryTerm = primaryTerm; this.generation = generation; this.readers = readers; this.checkpointGenFileNameMapper = checkpointGenFileNameMapper; this.location = location; + this.nodeId = nodeId; } public TranslogCheckpointTransferSnapshot build() throws IOException { @@ -134,7 +146,8 @@ public TranslogCheckpointTransferSnapshot build() throws IOException { TranslogCheckpointTransferSnapshot translogTransferSnapshot = new TranslogCheckpointTransferSnapshot( primaryTerm, generation, - readers.size() + readers.size(), + nodeId ); for (TranslogReader reader : readers) { final long readerGeneration = reader.getGeneration(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index d7e50cdcf32e4..d988b8a6254ff 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -24,6 +24,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; @@ -64,6 +65,8 @@ public class TranslogTransferManager { private static final long TRANSFER_TIMEOUT_IN_MILLIS = 30000; + private static final int METADATA_FILES_TO_FETCH = 10; + private final Logger logger; private final static String METADATA_DIR = "metadata"; private final static String DATA_DIR = "data"; @@ -275,6 +278,10 @@ public TranslogTransferMetadata readMetadata() throws IOException { LatchedActionListener> latchedActionListener = new LatchedActionListener<>( ActionListener.wrap(blobMetadataList -> { if (blobMetadataList.isEmpty()) return; + RemoteStoreUtils.verifyNoMultipleWriters( + blobMetadataList.stream().map(BlobMetadata::name).collect(Collectors.toList()), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ); String filename = blobMetadataList.get(0).name(); boolean downloadStatus = false; long downloadStartTime = System.nanoTime(), bytesToRead = 0; @@ -295,6 +302,9 @@ public TranslogTransferMetadata readMetadata() throws IOException { } } }, e -> { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } logger.error(() -> new ParameterizedMessage("Exception while listing metadata files"), e); exceptionSetOnce.set((IOException) e); }), @@ -305,7 +315,7 @@ public TranslogTransferMetadata readMetadata() throws IOException { transferService.listAllInSortedOrder( remoteMetadataTransferPath, TranslogTransferMetadata.METADATA_PREFIX, - 1, + METADATA_FILES_TO_FETCH, latchedActionListener ); latch.await(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java index a8b3404d3f2ce..07ec9160db1e3 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java @@ -9,6 +9,7 @@ package org.opensearch.index.translog.transfer; import org.opensearch.common.SetOnce; +import org.opensearch.common.collect.Tuple; import org.opensearch.index.remote.RemoteStoreUtils; import java.util.Arrays; @@ -30,7 +31,7 @@ public class TranslogTransferMetadata { private final long minTranslogGeneration; - private int count; + private final int count; private final SetOnce> generationToPrimaryTermMapper = new SetOnce<>(); @@ -46,12 +47,22 @@ public class TranslogTransferMetadata { private final long createdAt; - public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count) { + private final String nodeId; + + public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count, String nodeId) { this.primaryTerm = primaryTerm; this.generation = generation; this.minTranslogGeneration = minTranslogGeneration; this.count = count; this.createdAt = System.currentTimeMillis(); + this.nodeId = nodeId; + } + + /* + Used only at the time of download . Since details are read from content , nodeId is not available + */ + public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count) { + this(primaryTerm, generation, minTranslogGeneration, count, ""); } public long getPrimaryTerm() { @@ -89,11 +100,33 @@ public String getFileName() { RemoteStoreUtils.invertLong(primaryTerm), RemoteStoreUtils.invertLong(generation), RemoteStoreUtils.invertLong(createdAt), + nodeId, String.valueOf(CURRENT_VERSION) ) ); } + public static Tuple, String> getNodeIdByPrimaryTermAndGeneration(String filename) { + String[] tokens = filename.split(METADATA_SEPARATOR); + if (tokens.length < 6) { + // For versions < 2.11, we don't have node id + return null; + } + return new Tuple<>(new Tuple<>(RemoteStoreUtils.invertLong(tokens[1]), RemoteStoreUtils.invertLong(tokens[2])), tokens[4]); + } + + public static Tuple getNodeIdByPrimaryTermAndGen(String filename) { + String[] tokens = filename.split(METADATA_SEPARATOR); + if (tokens.length < 6) { + // For versions < 2.11, we don't have node id. + return null; + } + String primaryTermAndGen = String.join(METADATA_SEPARATOR, tokens[1], tokens[2]); + + String nodeId = tokens[4]; + return new Tuple<>(primaryTermAndGen, nodeId); + } + @Override public int hashCode() { return Objects.hash(primaryTerm, generation); diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index efa13438cffcf..b8bb73bb89a82 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -4005,7 +4005,7 @@ public void testRecoverFromForeignTranslog() throws IOException { final Path badTranslogLog = createTempDir(); final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); Translog translog = new LocalTranslog( - new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, @@ -4022,7 +4022,8 @@ public void testRecoverFromForeignTranslog() throws IOException { shardId, translog.location(), config.getIndexSettings(), - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + "" ); EngineConfig brokenConfig = new EngineConfig.Builder().shardId(shardId) @@ -7711,7 +7712,8 @@ public void testNotWarmUpSearcherInEngineCtor() throws Exception { config.getTranslogConfig().getShardId(), createTempDir(), config.getTranslogConfig().getIndexSettings(), - config.getTranslogConfig().getBigArrays() + config.getTranslogConfig().getBigArrays(), + "" ); EngineConfig configWithWarmer = new EngineConfig.Builder().shardId(config.getShardId()) .threadPool(config.getThreadPool()) diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index 9afa75dd601b2..181c846d394a0 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -8,10 +8,85 @@ package org.opensearch.index.remote; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.stream.Collectors; + +import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; +import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; + public class RemoteStoreUtilsTests extends OpenSearchTestCase { + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1" + ); + + private final String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 2, + 1, + "node-2" + ); + private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1" + ); + + private final String oldMetadataFilename = getOldSegmentMetadataFilename(12, 23, 34, 1, 1); + + /* + Gives segment metadata filename for <2.11 version + */ + public static String getOldSegmentMetadataFilename( + long primaryTerm, + long generation, + long translogGeneration, + long uploadCounter, + int metadataVersion + ) { + return String.join( + SEPARATOR, + METADATA_PREFIX, + RemoteStoreUtils.invertLong(primaryTerm), + RemoteStoreUtils.invertLong(generation), + RemoteStoreUtils.invertLong(translogGeneration), + RemoteStoreUtils.invertLong(uploadCounter), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(metadataVersion) + ); + } + + public static String getOldTranslogMetadataFilename(long primaryTerm, long generation, int metadataVersion) { + return String.join( + METADATA_SEPARATOR, + METADATA_PREFIX, + RemoteStoreUtils.invertLong(primaryTerm), + RemoteStoreUtils.invertLong(generation), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(metadataVersion) + ); + } + public void testInvertToStrInvalid() { assertThrows(IllegalArgumentException.class, () -> RemoteStoreUtils.invertLong(-1)); } @@ -60,4 +135,48 @@ public void testGetSegmentNameUnderscoreDelimiterOverrides() { public void testGetSegmentNameException() { assertThrows(IllegalArgumentException.class, () -> RemoteStoreUtils.getSegmentName("dvd")); } + + public void testVerifyMultipleWriters_Segment() { + List mdFiles = new ArrayList<>(); + mdFiles.add(metadataFilename); + mdFiles.add(metadataFilename2); + mdFiles.add(oldMetadataFilename); + verifyNoMultipleWriters(mdFiles, RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen); + + mdFiles.add(metadataFilenameDup); + assertThrows( + IllegalStateException.class, + () -> verifyNoMultipleWriters(mdFiles, RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen) + ); + } + + public void testVerifyMultipleWriters_Translog() throws InterruptedException { + TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node-1"); + String mdFilename = tm.getFileName(); + Thread.sleep(1); + TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node-1"); + String mdFilename2 = tm2.getFileName(); + List bmList = new LinkedList<>(); + bmList.add(new PlainBlobMetadata(mdFilename, 1)); + bmList.add(new PlainBlobMetadata(mdFilename2, 1)); + bmList.add(new PlainBlobMetadata(getOldTranslogMetadataFilename(1, 1, 1), 1)); + RemoteStoreUtils.verifyNoMultipleWriters( + bmList.stream().map(BlobMetadata::name).collect(Collectors.toList()), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ); + + bmList = new LinkedList<>(); + bmList.add(new PlainBlobMetadata(mdFilename, 1)); + TranslogTransferMetadata tm3 = new TranslogTransferMetadata(1, 1, 1, 2, "node-2"); + bmList.add(new PlainBlobMetadata(tm3.getFileName(), 1)); + List finalBmList = bmList; + assertThrows( + IllegalStateException.class, + () -> RemoteStoreUtils.verifyNoMultipleWriters( + finalBmList.stream().map(BlobMetadata::name).collect(Collectors.toList()), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ) + ); + } + } diff --git a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java index afe6e47bec7b2..a45b25f04060b 100644 --- a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java @@ -133,7 +133,8 @@ public void setupListeners() throws Exception { shardId, createTempDir("translog"), indexSettings, - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + "" ); Engine.EventListener eventListener = new Engine.EventListener() { @Override diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 415efd4ac23b6..5a13f57db2c87 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -46,6 +46,7 @@ import java.util.concurrent.atomic.AtomicLong; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH; import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; import static org.opensearch.test.RemoteStoreTestUtils.getDummyMetadata; import static org.mockito.ArgumentMatchers.any; @@ -138,7 +139,8 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { return Collections.singletonList("dummy string"); } throw new IOException(); - }).when(remoteMetadataDirectory).listFilesByPrefixInLexicographicOrder(MetadataFilenameUtils.METADATA_PREFIX, 1); + }).when(remoteMetadataDirectory) + .listFilesByPrefixInLexicographicOrder(MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH); SegmentInfos segmentInfos; try (Store indexShardStore = indexShard.store()) { @@ -166,7 +168,10 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { // Validate that the stream of metadata file of remoteMetadataDirectory has been opened only once and the // listFilesByPrefixInLexicographicOrder has been called twice. verify(remoteMetadataDirectory, times(1)).getBlobStream(any()); - verify(remoteMetadataDirectory, times(2)).listFilesByPrefixInLexicographicOrder(MetadataFilenameUtils.METADATA_PREFIX, 1); + verify(remoteMetadataDirectory, times(2)).listFilesByPrefixInLexicographicOrder( + MetadataFilenameUtils.METADATA_PREFIX, + METADATA_FILES_TO_FETCH + ); } public void testAfterRefresh() throws IOException { @@ -579,4 +584,5 @@ private void verifyUploadedSegments(RemoteSegmentStoreDirectory remoteSegmentSto } } } + } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java index d7bbe52aa3905..cad5e47531cc6 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java @@ -35,6 +35,7 @@ import org.mockito.ArgumentCaptor; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; @@ -78,7 +79,12 @@ public void testNewDirectory() throws IOException { latchedActionListener.onResponse(List.of()); return null; }).when(blobContainer) - .listBlobsByPrefixInSortedOrder(any(), eq(1), eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), any(ActionListener.class)); + .listBlobsByPrefixInSortedOrder( + any(), + eq(METADATA_FILES_TO_FETCH), + eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), + any(ActionListener.class) + ); when(repositoriesService.repository("remote_store_repository")).thenReturn(repository); @@ -93,7 +99,7 @@ public void testNewDirectory() throws IOException { verify(blobContainer).listBlobsByPrefixInSortedOrder( eq(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX), - eq(1), + eq(METADATA_FILES_TO_FETCH), eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), any() ); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 8d99d98fbaaf4..f154dddb0e7cc 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -62,6 +62,7 @@ import org.mockito.Mockito; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH; import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; import static org.opensearch.test.RemoteStoreTestUtils.getDummyMetadata; import static org.hamcrest.CoreMatchers.is; @@ -90,9 +91,39 @@ public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase { private SegmentInfos segmentInfos; private ThreadPool threadPool; - private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 23, 34, 1, 1); - private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 13, 34, 1, 1); - private final String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(10, 38, 34, 1, 1); + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1" + ); + + private final String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 2, + 1, + "node-2" + ); + private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1" + ); + private final String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 38, + 34, + 1, + 1, + "node-1" + ); @Before public void setup() throws IOException { @@ -183,7 +214,7 @@ public void testGetPrimaryTermGenerationUuid() { } public void testInitException() throws IOException { - when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, 1)).thenThrow( + when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH)).thenThrow( new IOException("Error") ); @@ -202,6 +233,13 @@ public void testInitNoMetadataFile() throws IOException { assertEquals(Set.of(), actualCache.keySet()); } + public void testInitMultipleMetadataFile() throws IOException { + when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH)).thenReturn( + List.of(metadataFilename, metadataFilenameDup) + ); + assertThrows(IllegalStateException.class, () -> remoteSegmentStoreDirectory.init()); + } + private Map> populateMetadata() throws IOException { List metadataFiles = new ArrayList<>(); @@ -212,7 +250,7 @@ private Map> populateMetadata() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(List.of(metadataFilename)); when( @@ -262,7 +300,7 @@ public void testInit() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(List.of(metadataFilename)); @@ -318,7 +356,7 @@ public void testFileLength() throws IOException { assertEquals(uploadedSegments.get("_0.si").getLength(), remoteSegmentStoreDirectory.fileLength("_0.si")); } - public void testFileLenghtNoSuchFile() throws IOException { + public void testFileLengthNoSuchFile() throws IOException { populateMetadata(); remoteSegmentStoreDirectory.init(); @@ -693,7 +731,7 @@ public void testContainsFile() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -739,7 +777,8 @@ public void testUploadMetadataEmpty() throws IOException { segmentInfos, storeDirectory, 34L, - indexShard.getLatestReplicationCheckpoint() + indexShard.getLatestReplicationCheckpoint(), + "" ) ); } @@ -757,7 +796,7 @@ public void testUploadMetadataNonEmpty() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); Map> metadataFilenameContentMapping = Map.of( @@ -785,7 +824,8 @@ public void testUploadMetadataNonEmpty() throws IOException { segInfos, storeDirectory, generation, - indexShard.getLatestReplicationCheckpoint() + indexShard.getLatestReplicationCheckpoint(), + "" ); verify(remoteMetadataDirectory).copyFrom( @@ -832,7 +872,8 @@ public void testUploadMetadataMissingSegment() throws IOException { segmentInfos, storeDirectory, 12L, - indexShard.getLatestReplicationCheckpoint() + indexShard.getLatestReplicationCheckpoint(), + "" ) ); verify(indexOutput).close(); @@ -855,7 +896,7 @@ public void testNoMetadataHeaderCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -878,7 +919,7 @@ public void testInvalidCodecHeaderCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -903,7 +944,7 @@ public void testHeaderMinVersionCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -928,7 +969,7 @@ public void testHeaderMaxVersionCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -953,7 +994,7 @@ public void testIncorrectChecksumCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -1114,12 +1155,12 @@ private void indexDocs(int startDocId, int numberOfDocs) throws IOException { } public void testMetadataFileNameOrder() { - String file1 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 21, 23, 1, 1); - String file2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 38, 1, 1); - String file3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(18, 12, 26, 1, 1); - String file4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 10, 1); - String file5 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 1, 1); - String file6 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 5, 1); + String file1 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 21, 23, 1, 1, ""); + String file2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 38, 1, 1, ""); + String file3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(18, 12, 26, 1, 1, ""); + String file4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 10, 1, ""); + String file5 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 1, 1, ""); + String file6 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 5, 1, ""); List actualList = new ArrayList<>(List.of(file1, file2, file3, file4, file5, file6)); actualList.sort(String::compareTo); diff --git a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java index 2de36574064cb..c098d11a3487f 100644 --- a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java @@ -38,7 +38,7 @@ public void testRecoveryFromTranslog() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -68,7 +68,7 @@ public void testRecoveryFromTranslog() throws IOException { translogManager.syncTranslog(); translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -117,7 +117,7 @@ public void testTranslogRollsGeneration() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -147,7 +147,7 @@ public void testTranslogRollsGeneration() throws IOException { translogManager.syncTranslog(); translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -182,7 +182,7 @@ public void testTrimOperationsFromTranslog() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -214,7 +214,7 @@ public void testTrimOperationsFromTranslog() throws IOException { translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -253,7 +253,7 @@ public void testTranslogSync() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); AtomicReference translogManagerAtomicReference = new AtomicReference<>(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), diff --git a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java index dbfc66d6de4b3..4997067b75198 100644 --- a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java @@ -291,7 +291,7 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting ); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); - return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); + return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, ""); } private Location addToTranslogAndList(Translog translog, List list, Translog.Operation op) throws IOException { @@ -1452,7 +1452,8 @@ public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) + new ByteSizeValue(1, ByteSizeUnit.KB), + "" ); final Set persistedSeqNos = new HashSet<>(); @@ -1550,7 +1551,8 @@ public void testTranslogWriterFsyncedWithLocalTranslog() throws IOException { temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) + new ByteSizeValue(1, ByteSizeUnit.KB), + "" ); final Set persistedSeqNos = new HashSet<>(); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 233d6f319b797..84506f7ab25ff 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -206,7 +206,7 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting ); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); - return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); + return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, ""); } private BlobStoreRepository createRepository() { @@ -1258,7 +1258,8 @@ public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) + new ByteSizeValue(1, ByteSizeUnit.KB), + "" ); final Set persistedSeqNos = new HashSet<>(); @@ -1360,7 +1361,8 @@ public void testTranslogWriterFsyncDisabledInRemoteFsTranslog() throws IOExcepti temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) + new ByteSizeValue(1, ByteSizeUnit.KB), + "" ); final Set persistedSeqNos = new HashSet<>(); diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java index 43b4d2c9847ab..e17d2770f014a 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java @@ -74,7 +74,7 @@ protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOExc } protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException { - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""); String translogUUID = Translog.createEmptyTranslog( translogPath, SequenceNumbers.NO_OPS_PERFORMED, diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index 6fc4557a75675..a48dbdcdacb71 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -16,6 +16,7 @@ import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.collect.Tuple; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -251,8 +252,8 @@ public void testReadMetadataNoFile() throws IOException { assertNoDownloadStats(false); } - // This should happen most of the time - Just a single metadata file - public void testReadMetadataSingleFile() throws IOException { + // This should happen most of the time - + public void testReadMetadataFile() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, @@ -260,12 +261,16 @@ public void testReadMetadataSingleFile() throws IOException { null, remoteTranslogTransferTracker ); - TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2); - String mdFilename = tm.getFileName(); + TranslogTransferMetadata metadata1 = new TranslogTransferMetadata(1, 1, 1, 2); + String mdFilename1 = metadata1.getFileName(); + + TranslogTransferMetadata metadata2 = new TranslogTransferMetadata(1, 0, 1, 2); + String mdFilename2 = metadata2.getFileName(); doAnswer(invocation -> { LatchedActionListener> latchedActionListener = invocation.getArgument(3); List bmList = new LinkedList<>(); - bmList.add(new PlainBlobMetadata(mdFilename, 1)); + bmList.add(new PlainBlobMetadata(mdFilename1, 1)); + bmList.add(new PlainBlobMetadata(mdFilename2, 1)); latchedActionListener.onResponse(bmList); return null; }).when(transferService) @@ -273,7 +278,7 @@ public void testReadMetadataSingleFile() throws IOException { TranslogTransferMetadata metadata = createTransferSnapshot().getTranslogTransferMetadata(); long delayForMdDownload = 1; - when(transferService.downloadBlob(any(BlobPath.class), eq(mdFilename))).thenAnswer(invocation -> { + when(transferService.downloadBlob(any(BlobPath.class), eq(mdFilename1))).thenAnswer(invocation -> { Thread.sleep(delayForMdDownload); return new ByteArrayInputStream(translogTransferManager.getMetadataBytes(metadata)); }); @@ -496,4 +501,36 @@ private void assertTlogCkpDownloadStats() { // Expect delay for both tlog and ckp file assertTrue(remoteTranslogTransferTracker.getTotalDownloadTimeInMillis() >= 2 * delayForBlobDownload); } + + public void testGetPrimaryTermAndGeneration() { + String tm = new TranslogTransferMetadata(1, 2, 1, 2, "node-1").getFileName(); + assertEquals(new Tuple<>(new Tuple<>(1L, 2L), "node-1"), TranslogTransferMetadata.getNodeIdByPrimaryTermAndGeneration(tm)); + } + + public void testMetadataConflict() throws InterruptedException { + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + null, + remoteTranslogTransferTracker + ); + TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node-1"); + String mdFilename = tm.getFileName(); + Thread.sleep(1); + TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node-2"); + String mdFilename2 = tm2.getFileName(); + + doAnswer(invocation -> { + LatchedActionListener> latchedActionListener = invocation.getArgument(3); + List bmList = new LinkedList<>(); + bmList.add(new PlainBlobMetadata(mdFilename, 1)); + bmList.add(new PlainBlobMetadata(mdFilename2, 1)); + latchedActionListener.onResponse(bmList); + return null; + }).when(transferService) + .listAllInSortedOrder(any(BlobPath.class), eq(TranslogTransferMetadata.METADATA_PREFIX), anyInt(), any(ActionListener.class)); + + assertThrows(RuntimeException.class, translogTransferManager::readMetadata); + } } diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 15f9ee546fe6b..43289a7c89524 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -527,7 +527,7 @@ protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOExc } protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException { - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""); String translogUUID = Translog.createEmptyTranslog( translogPath, SequenceNumbers.NO_OPS_PERFORMED, @@ -872,7 +872,13 @@ public EngineConfig config( final Engine.EventListener eventListener ) { final IndexWriterConfig iwc = newIndexWriterConfig(); - final TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + final TranslogConfig translogConfig = new TranslogConfig( + shardId, + translogPath, + indexSettings, + BigArrays.NON_RECYCLING_INSTANCE, + "" + ); final List extRefreshListenerList = externalRefreshListener == null ? emptyList() : Collections.singletonList(externalRefreshListener); @@ -939,7 +945,7 @@ protected EngineConfig config( .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .build() ); - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, ""); return new EngineConfig.Builder().shardId(config.getShardId()) .threadPool(config.getThreadPool()) .indexSettings(indexSettings) diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 4505570b8ebe0..466c00d0648dc 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -697,7 +697,8 @@ protected IndexShard newShard( checkpointPublisher, remoteStore, remoteStoreStatsTrackerFactory, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + "dummy-node" ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); if (remoteStoreStatsTrackerFactory != null) { From 699d235a7edad81be19dabbb57e2933ad4f1b6d6 Mon Sep 17 00:00:00 2001 From: Dharmesh Date: Wed, 4 Oct 2023 18:18:41 +0530 Subject: [PATCH 20/26] [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository (#9839) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --------- Signed-off-by: Dharmesh 💤 --- CHANGELOG.md | 3 +- .../repositories/azure/AzureRepository.java | 11 +++ .../azure/AzureRepositorySettingsTests.java | 21 ++++ .../gcs/GoogleCloudStorageRepository.java | 11 +++ .../repositories/s3/S3Repository.java | 11 +++ .../repositories/s3/S3RepositoryTests.java | 16 ++++ ...emoteStoreMockRepositoryIntegTestCase.java | 13 +-- .../remotestore/RemoteIndexRecoveryIT.java | 3 +- .../remotestore/RemoteRestoreSnapshotIT.java | 21 +--- .../RemoteStoreBackpressureIT.java | 2 +- .../RemoteStoreBaseIntegTestCase.java | 15 ++- .../RemoteStoreRefreshListenerIT.java | 2 +- .../RemoteStoreRepositoryRegistrationIT.java | 68 +++++++++++++ .../SegmentReplicationUsingRemoteStoreIT.java | 3 +- ...tReplicationWithRemoteStorePressureIT.java | 3 +- .../multipart/RemoteStoreMultipartIT.java | 10 +- .../repositories/RepositoriesServiceIT.java | 12 +++ .../opensearch/snapshots/CloneSnapshotIT.java | 3 - .../snapshots/SystemRepositoryIT.java | 71 ++++++++++++++ .../put/TransportPutRepositoryAction.java | 2 +- .../metadata/RepositoriesMetadata.java | 8 +- .../remotestore/RemoteStoreNodeService.java | 14 ++- .../repositories/RepositoriesService.java | 96 ++++++++++++++++++- .../opensearch/repositories/Repository.java | 11 +++ .../blobstore/BlobStoreRepository.java | 6 ++ .../repositories/fs/FsRepository.java | 10 ++ .../RepositoriesServiceTests.java | 26 +++-- .../repositories/fs/FsRepositoryTests.java | 28 ++++++ .../java/org/opensearch/test/TestCluster.java | 15 ++- 29 files changed, 454 insertions(+), 61 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a37866012ba1..c847679dafe70 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -90,6 +90,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) - Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) - Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) +- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) ### Dependencies - Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) @@ -141,4 +142,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java index 65852c4fc5bd0..381a35bbc11e1 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java @@ -47,6 +47,8 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; +import java.util.ArrayList; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.Function; @@ -192,4 +194,13 @@ protected ByteSizeValue chunkSize() { public boolean isReadOnly() { return readonly; } + + @Override + public List> getRestrictedSystemRepositorySettings() { + List> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(Repository.BASE_PATH_SETTING); + restrictedSettings.add(Repository.LOCATION_MODE_SETTING); + return restrictedSettings; + } } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java index c8be30dbaf865..3356e5174592a 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java @@ -34,16 +34,20 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; +import java.util.List; + import reactor.core.scheduler.Schedulers; import static org.hamcrest.Matchers.is; @@ -179,4 +183,21 @@ public void testChunkSize() { ); } + public void testSystemRepositoryDefault() { + assertThat(azureRepository(Settings.EMPTY).isSystemRepository(), is(false)); + } + + public void testSystemRepositoryOn() { + assertThat(azureRepository(Settings.builder().put("system_repository", true).build()).isSystemRepository(), is(true)); + } + + public void testRestrictedSettingsDefault() { + List> restrictedSettings = azureRepository(Settings.EMPTY).getRestrictedSystemRepositorySettings(); + assertThat(restrictedSettings.size(), is(5)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + assertTrue(restrictedSettings.contains(AzureRepository.Repository.BASE_PATH_SETTING)); + assertTrue(restrictedSettings.contains(AzureRepository.Repository.LOCATION_MODE_SETTING)); + } } diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java index c42cd1802f6e9..13671bc2aa8d6 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -46,6 +46,8 @@ import org.opensearch.repositories.RepositoryException; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.function.Function; @@ -138,6 +140,15 @@ protected ByteSizeValue chunkSize() { return chunkSize; } + @Override + public List> getRestrictedSystemRepositorySettings() { + List> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(BUCKET); + restrictedSettings.add(BASE_PATH); + return restrictedSettings; + } + /** * Get a given setting from the repository settings, throwing a {@link RepositoryException} if the setting does not exist or is empty. */ diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index 59111e94df22e..8e69fc39f128e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -62,7 +62,9 @@ import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.threadpool.Scheduler; +import java.util.ArrayList; import java.util.Collection; +import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -388,6 +390,15 @@ protected ByteSizeValue chunkSize() { return chunkSize; } + @Override + public List> getRestrictedSystemRepositorySettings() { + List> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(BUCKET_SETTING); + restrictedSettings.add(BASE_PATH_SETTING); + return restrictedSettings; + } + @Override protected void doClose() { final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java index 533c3aa17009d..9ea8d98505161 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java @@ -36,17 +36,20 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; import java.nio.file.Path; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.containsString; @@ -133,6 +136,19 @@ public void testDefaultBufferSize() { } } + public void testRestrictedSettingsDefault() { + final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY); + try (S3Repository s3repo = createS3Repo(metadata)) { + List> restrictedSettings = s3repo.getRestrictedSystemRepositorySettings(); + assertThat(restrictedSettings.size(), is(5)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + assertTrue(restrictedSettings.contains(S3Repository.BUCKET_SETTING)); + assertTrue(restrictedSettings.contains(S3Repository.BASE_PATH_SETTING)); + } + } + private S3Repository createS3Repo(RepositoryMetadata metadata) { return new S3Repository( metadata, diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java index bc55f6cc2cbcb..9d4d8aa24bd51 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java @@ -33,7 +33,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public abstract class AbstractRemoteStoreMockRepositoryIntegTestCase extends AbstractSnapshotIntegTestCase { @@ -107,11 +106,11 @@ public Settings buildRemoteStoreNodeAttributes(Path repoLocation, double ioFailu .build(); } - protected void deleteRepo() { - logger.info("--> Deleting the repository={}", REPOSITORY_NAME); - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); - logger.info("--> Deleting the repository={}", TRANSLOG_REPOSITORY_NAME); - assertAcked(clusterAdmin().prepareDeleteRepository(TRANSLOG_REPOSITORY_NAME)); + protected void cleanupRepo() { + logger.info("--> Cleanup the repository={}", REPOSITORY_NAME); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).execute().actionGet(); + logger.info("--> Cleanup the repository={}", TRANSLOG_REPOSITORY_NAME); + clusterAdmin().prepareCleanupRepository(TRANSLOG_REPOSITORY_NAME).execute().actionGet(); } protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) { @@ -125,6 +124,8 @@ protected String setup(Path repoLocation, double ioFailureRate, String skipExcep settings.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT); } + disableRepoConsistencyCheck("Remote Store Creates System Repository"); + internalCluster().startClusterManagerOnlyNode(settings.build()); String dataNodeName = internalCluster().startDataOnlyNode(settings.build()); createIndex(INDEX_NAME); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java index 4eb1cc7703735..c957f1b338bfe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java @@ -23,7 +23,6 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteIndexRecoveryIT extends IndexRecoveryIT { @@ -57,7 +56,7 @@ public Settings indexSettings() { @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 4ebccb9b9e551..865b2d13f189e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -57,7 +57,7 @@ public void setup() { @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(BASE_REMOTE_REPO)); + clusterAdmin().prepareCleanupRepository(BASE_REMOTE_REPO).get(); } @Override @@ -422,7 +422,7 @@ public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); } - public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionException, InterruptedException { + public void testRestoreShallowSnapshotRepository() throws ExecutionException, InterruptedException { String indexName1 = "testindex1"; String snapshotRepoName = "test-restore-snapshot-repo"; String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; @@ -464,22 +464,7 @@ public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionExce assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); - createRepository(BASE_REMOTE_REPO, "fs", absolutePath2); - - RestoreSnapshotResponse restoreSnapshotResponse = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1) - .get(); - - assertTrue(restoreSnapshotResponse.getRestoreInfo().failedShards() > 0); - - ensureRed(restoredIndexName1); - - client().admin().indices().close(Requests.closeIndexRequest(restoredIndexName1)).get(); + client().admin().indices().close(Requests.closeIndexRequest(indexName1)).get(); createRepository(remoteStoreRepoNameUpdated, "fs", remoteRepoPath); RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() .cluster() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java index d02c5bf54fbed..3462054c23630 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java @@ -112,7 +112,7 @@ private void validateBackpressure( stats = stats(); indexDocAndRefresh(initialSource, initialDocsToIndex); assertEquals(rejectionCount, stats.rejectionCount); - deleteRepo(); + cleanupRepo(); } private RemoteSegmentTransferTracker.Stats stats() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 157f8e41fee24..bf536557b1485 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -27,6 +27,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchIntegTestCase; @@ -50,7 +51,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; @@ -314,8 +314,8 @@ public void teardown() { clusterSettingsSuppliedByTest = false; assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_NAME); assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_2_NAME); - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_2_NAME)); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); + clusterAdmin().prepareCleanupRepository(REPOSITORY_2_NAME).get(); } public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { @@ -343,11 +343,18 @@ public void assertRemoteStoreRepositoryOnAllNodes(String repositoryName) { .custom(RepositoriesMetadata.TYPE); RepositoryMetadata actualRepository = repositories.repository(repositoryName); + final RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); + for (String nodeName : internalCluster().getNodeNames()) { ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName); DiscoveryNode node = clusterService.localNode(); RepositoryMetadata expectedRepository = buildRepositoryMetadata(node, repositoryName); - assertTrue(actualRepository.equalsIgnoreGenerations(expectedRepository)); + + // Validated that all the restricted settings are entact on all the nodes. + repository.getRestrictedSystemRepositorySettings() + .stream() + .forEach(setting -> assertEquals(setting.get(actualRepository.settings()), setting.get(expectedRepository.settings()))); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java index b97e93f323fb2..88760b7bbfad2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java @@ -56,7 +56,7 @@ public void testRemoteRefreshRetryOnFailure() throws Exception { logger.info("Local files = {}, Repo files = {}", sortedFilesInLocal, sortedFilesInRepo); assertTrue(filesInRepo.containsAll(filesInLocal)); }, 90, TimeUnit.SECONDS); - deleteRepo(); + cleanupRepo(); } public void testRemoteRefreshSegmentPressureSettingChanged() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java index 4d56a1e94e3fc..002a149f0c286 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -8,6 +8,10 @@ package org.opensearch.remotestore; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; @@ -94,4 +98,68 @@ public void testMultiNodeClusterRandomNodeRecoverNetworkIsolation() { internalCluster().clearDisruptionScheme(); } + + public void testMultiNodeClusterRandomNodeRecoverNetworkIsolationPostNonRestrictedSettingsUpdate() { + Set nodesInOneSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + Set nodesInAnotherSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + ensureStableCluster(6); + + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInAnotherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + + networkDisruption.startDisrupting(); + + final Client client = client(nodesInOneSide.iterator().next()); + RepositoryMetadata repositoryMetadata = client.admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME) + .get() + .repositories() + .get(0); + Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20)); + updatedSettings.remove("system_repository"); + + client.admin() + .cluster() + .preparePutRepository(repositoryMetadata.name()) + .setType(repositoryMetadata.type()) + .setSettings(updatedSettings) + .get(); + + ensureStableCluster(3, nodesInOneSide.stream().findAny().get()); + networkDisruption.stopDisrupting(); + + ensureStableCluster(6); + + internalCluster().clearDisruptionScheme(); + } + + public void testNodeRestartPostNonRestrictedSettingsUpdate() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startNodes(3); + + final Client client = client(); + RepositoryMetadata repositoryMetadata = client.admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME) + .get() + .repositories() + .get(0); + Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20)); + updatedSettings.remove("system_repository"); + + client.admin() + .cluster() + .preparePutRepository(repositoryMetadata.name()) + .setType(repositoryMetadata.type()) + .setSettings(updatedSettings) + .get(); + + internalCluster().restartRandomDataNode(); + + ensureStableCluster(4); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java index 45c3ef7f5bae5..23864c35ad154 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java @@ -17,7 +17,6 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; /** * This class runs Segment Replication Integ test suite with remote store enabled. @@ -50,6 +49,6 @@ public void setup() { @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java index 0da4d81a8871e..6cfc76b7e3223 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java @@ -17,7 +17,6 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; /** * This class executes the SegmentReplicationPressureIT suite with remote store integration enabled. @@ -49,6 +48,6 @@ public void setup() { @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java index 98fab139f4902..3dfde6f472525 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java @@ -108,8 +108,15 @@ public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String nam } public void testRateLimitedRemoteUploads() throws Exception { + clusterSettingsSuppliedByTest = true; overrideBuildRepositoryMetadata = true; - internalCluster().startNode(); + Settings.Builder clusterSettings = Settings.builder() + .put(remoteStoreClusterSettings(REPOSITORY_NAME, repositoryLocation, REPOSITORY_2_NAME, repositoryLocation)); + clusterSettings.put( + String.format(Locale.getDefault(), "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, REPOSITORY_NAME), + MockFsRepositoryPlugin.TYPE + ); + internalCluster().startNode(clusterSettings.build()); Client client = client(); logger.info("--> updating repository"); assertAcked( @@ -119,7 +126,6 @@ public void testRateLimitedRemoteUploads() throws Exception { .setType(MockFsRepositoryPlugin.TYPE) .setSettings( Settings.builder() - .put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true) .put("location", repositoryLocation) .put("compress", compress) .put("max_remote_upload_bytes_per_sec", "1kb") diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java index f149d538cc47a..b8415f4b41815 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java @@ -108,4 +108,16 @@ public void testUpdateRepository() { final Repository updatedRepository = repositoriesService.repository(repositoryName); assertThat(updatedRepository, updated ? not(sameInstance(originalRepository)) : sameInstance(originalRepository)); } + + public void testSystemRepositoryCantBeCreated() { + internalCluster(); + final String repositoryName = "test-repo"; + final Client client = client(); + final Settings.Builder repoSettings = Settings.builder().put("system_repository", true).put("location", randomRepoPath()); + + assertThrows( + RepositoryException.class, + () -> client.admin().cluster().preparePutRepository(repositoryName).setType(FsRepository.TYPE).setSettings(repoSettings).get() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 066d82483ae91..83f93ab9ff6b5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -215,9 +215,6 @@ public void testShallowCloneNameAvailability() throws Exception { final Path shallowSnapshotRepoPath = randomRepoPath(); createRepository(shallowSnapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(shallowSnapshotRepoPath)); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java new file mode 100644 index 0000000000000..f50fc691fb232 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.snapshots; + +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.nio.file.Path; + +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SystemRepositoryIT extends AbstractSnapshotIntegTestCase { + protected Path absolutePath; + final String systemRepoName = "system-repo-name"; + + @Before + public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(systemRepoName, absolutePath)) + .build(); + } + + public void testRestrictedSettingsCantBeUpdated() { + disableRepoConsistencyCheck("System repository is being used for the test"); + + internalCluster().startNode(); + final Client client = client(); + final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); + + RepositoryException e = expectThrows( + RepositoryException.class, + () -> client.admin().cluster().preparePutRepository(systemRepoName).setType("mock").setSettings(repoSettings).get() + ); + assertEquals( + e.getMessage(), + "[system-repo-name] trying to modify an unmodifiable attribute type of system " + + "repository from current value [fs] to new value [mock]" + ); + } + + public void testSystemRepositoryNonRestrictedSettingsCanBeUpdated() { + disableRepoConsistencyCheck("System repository is being used for the test"); + + internalCluster().startNode(); + final Client client = client(); + final Settings.Builder repoSettings = Settings.builder().put("location", absolutePath).put("chunk_size", new ByteSizeValue(20)); + + assertAcked( + client.admin().cluster().preparePutRepository(systemRepoName).setType(FsRepository.TYPE).setSettings(repoSettings).get() + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index 644f23d2bafe6..1eadab6b1352e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -100,7 +100,7 @@ protected void clusterManagerOperation( ClusterState state, final ActionListener listener ) { - repositoriesService.registerRepository( + repositoriesService.registerOrUpdateRepository( request, ActionListener.delegateFailure( listener, diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java index 1ab402e1bde4e..a5ef337c3b62a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java @@ -54,6 +54,8 @@ import java.util.EnumSet; import java.util.List; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; + /** * Contains metadata about registered snapshot repositories * @@ -288,8 +290,12 @@ public static void toXContent(RepositoryMetadata repository, XContentBuilder bui if (repository.cryptoMetadata() != null) { repository.cryptoMetadata().toXContent(repository.cryptoMetadata(), builder, params); } + Settings settings = repository.settings(); + if (SYSTEM_REPOSITORY_SETTING.get(settings)) { + settings = repository.settings().filter(s -> !s.equals(SYSTEM_REPOSITORY_SETTING.getKey())); + } builder.startObject("settings"); - repository.settings().toXContent(builder, params); + settings.toXContent(builder, params); builder.endObject(); if (params.paramAsBoolean(HIDE_GENERATIONS_PARAM, false) == false) { diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java index 26c078353d12a..ca2413a057a6b 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java @@ -17,6 +17,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryException; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -133,10 +134,19 @@ public RepositoriesMetadata updateRepositoriesMetadata(DiscoveryNode joiningNode boolean repositoryAlreadyPresent = false; for (RepositoryMetadata existingRepositoryMetadata : existingRepositories.repositories()) { if (newRepositoryMetadata.name().equals(existingRepositoryMetadata.name())) { - if (newRepositoryMetadata.equalsIgnoreGenerations(existingRepositoryMetadata)) { + try { + // This will help in handling two scenarios - + // 1. When a fresh cluster is formed and a node tries to join the cluster, the repository + // metadata constructed from the node attributes of the joining node will be validated + // against the repository information provided by existing nodes in cluster state. + // 2. It's possible to update repository settings except the restricted ones post the + // creation of a system repository and if a node drops we will need to allow it to join + // even if the non-restricted system repository settings are now different. + repositoriesService.get().ensureValidSystemRepositoryUpdate(newRepositoryMetadata, existingRepositoryMetadata); + newRepositoryMetadata = existingRepositoryMetadata; repositoryAlreadyPresent = true; break; - } else { + } catch (RepositoryException e) { throw new IllegalStateException( "new repository metadata [" + newRepositoryMetadata diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index 3d6679b3ef80e..0361a71116a16 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -84,6 +84,7 @@ import java.util.stream.Stream; import static org.opensearch.repositories.blobstore.BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; /** * Service responsible for maintaining and providing access to snapshot repositories on nodes. @@ -154,7 +155,7 @@ public RepositoriesService( } /** - * Registers new repository in the cluster + * Registers new repository or updates an existing repository in the cluster *

* This method can be only called on the cluster-manager node. It tries to create a new repository on the master * and if it was successful it adds new repository to cluster metadata. @@ -162,7 +163,7 @@ public RepositoriesService( * @param request register repository request * @param listener register repository listener */ - public void registerRepository(final PutRepositoryRequest request, final ActionListener listener) { + public void registerOrUpdateRepository(final PutRepositoryRequest request, final ActionListener listener) { assert lifecycle.started() : "Trying to register new repository but service is in state [" + lifecycle.state() + "]"; final RepositoryMetadata newRepositoryMetadata = new RepositoryMetadata( @@ -236,14 +237,30 @@ public ClusterState execute(ClusterState currentState) { List repositoriesMetadata = new ArrayList<>(repositories.repositories().size() + 1); for (RepositoryMetadata repositoryMetadata : repositories.repositories()) { - if (repositoryMetadata.name().equals(newRepositoryMetadata.name())) { - if (newRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)) { + RepositoryMetadata updatedRepositoryMetadata = newRepositoryMetadata; + if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { + Settings updatedSettings = Settings.builder() + .put(newRepositoryMetadata.settings()) + .put(SYSTEM_REPOSITORY_SETTING.getKey(), true) + .build(); + updatedRepositoryMetadata = new RepositoryMetadata( + newRepositoryMetadata.name(), + newRepositoryMetadata.type(), + updatedSettings, + newRepositoryMetadata.cryptoMetadata() + ); + } + if (repositoryMetadata.name().equals(updatedRepositoryMetadata.name())) { + if (updatedRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)) { // Previous version is the same as this one no update is needed. return currentState; } ensureCryptoSettingsAreSame(repositoryMetadata, request); found = true; - repositoriesMetadata.add(newRepositoryMetadata); + if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { + ensureValidSystemRepositoryUpdate(updatedRepositoryMetadata, repositoryMetadata); + } + repositoriesMetadata.add(updatedRepositoryMetadata); } else { repositoriesMetadata.add(repositoryMetadata); } @@ -315,6 +332,7 @@ public ClusterState execute(ClusterState currentState) { for (RepositoryMetadata repositoryMetadata : repositories.repositories()) { if (Regex.simpleMatch(request.name(), repositoryMetadata.name())) { ensureRepositoryNotInUse(currentState, repositoryMetadata.name()); + ensureNotSystemRepository(repositoryMetadata); logger.info("delete repository [{}]", repositoryMetadata.name()); changed = true; } else { @@ -682,6 +700,15 @@ public static void validateRepositoryMetadataSettings( + minVersionInCluster ); } + // Validation to not allow users to create system repository via put repository call. + if (isSystemRepositorySettingPresent(repositoryMetadataSettings)) { + throw new RepositoryException( + repositoryName, + "setting " + + SYSTEM_REPOSITORY_SETTING.getKey() + + " cannot provide system repository setting; this setting is managed by OpenSearch" + ); + } } private static void ensureRepositoryNotInUse(ClusterState clusterState, String repository) { @@ -756,6 +783,65 @@ public void updateRepositoriesMap(Map repos) { } } + private static void ensureNotSystemRepository(RepositoryMetadata repositoryMetadata) { + if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { + throw new RepositoryException(repositoryMetadata.name(), "cannot delete a system repository"); + } + } + + private static boolean isSystemRepositorySettingPresent(Settings repositoryMetadataSettings) { + return SYSTEM_REPOSITORY_SETTING.get(repositoryMetadataSettings); + } + + private static boolean isValueEqual(String key, String newValue, String currentValue) { + if (newValue == null && currentValue == null) { + return true; + } + if (newValue == null) { + throw new IllegalArgumentException("[" + key + "] cannot be empty, " + "current value [" + currentValue + "]"); + } + if (newValue.equals(currentValue) == false) { + throw new IllegalArgumentException( + "trying to modify an unmodifiable attribute " + + key + + " of system repository from " + + "current value [" + + currentValue + + "] to new value [" + + newValue + + "]" + ); + } + return true; + } + + public void ensureValidSystemRepositoryUpdate(RepositoryMetadata newRepositoryMetadata, RepositoryMetadata currentRepositoryMetadata) { + if (isSystemRepositorySettingPresent(currentRepositoryMetadata.settings())) { + try { + isValueEqual("type", newRepositoryMetadata.type(), currentRepositoryMetadata.type()); + + Repository repository = repositories.get(currentRepositoryMetadata.name()); + Settings newRepositoryMetadataSettings = newRepositoryMetadata.settings(); + Settings currentRepositoryMetadataSettings = currentRepositoryMetadata.settings(); + + List restrictedSettings = repository.getRestrictedSystemRepositorySettings() + .stream() + .map(setting -> setting.getKey()) + .collect(Collectors.toList()); + + for (String restrictedSettingKey : restrictedSettings) { + isValueEqual( + restrictedSettingKey, + newRepositoryMetadataSettings.get(restrictedSettingKey), + currentRepositoryMetadataSettings.get(restrictedSettingKey) + ); + } + } catch (IllegalArgumentException e) { + throw new RepositoryException(currentRepositoryMetadata.name(), e.getMessage()); + } + } + } + @Override protected void doStart() { diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index 10f3dc2b6b340..02ba60cb23c4d 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.lifecycle.LifecycleComponent; +import org.opensearch.common.settings.Setting; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.mapper.MapperService; @@ -55,6 +56,8 @@ import java.io.IOException; import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -342,6 +345,14 @@ void restoreShard( ActionListener listener ); + /** + * Returns the list of restricted system repository settings that cannot be mutated post repository creation. + * @return list of settings + */ + default List> getRestrictedSystemRepositorySettings() { + return Collections.emptyList(); + } + /** * Returns Snapshot Shard Metadata for remote store interop enabled snapshot. *

diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 490ebda24bf60..3481e43cf4c72 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -149,6 +149,7 @@ import java.io.InputStream; import java.nio.file.NoSuchFileException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -3138,6 +3139,11 @@ public InputStream maybeRateLimitSnapshots(InputStream stream) { return maybeRateLimit(stream, () -> snapshotRateLimiter, snapshotRateLimitingTimeInNanos, BlobStoreTransferContext.SNAPSHOT); } + @Override + public List> getRestrictedSystemRepositorySettings() { + return Arrays.asList(SYSTEM_REPOSITORY_SETTING, READONLY_SETTING, REMOTE_STORE_INDEX_SHALLOW_COPY); + } + @Override public RemoteStoreShardShallowCopySnapshot getRemoteStoreShallowCopyShardMetadata( SnapshotId snapshotId, diff --git a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java index 3009466f03635..d432bab93bd71 100644 --- a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java @@ -50,6 +50,8 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; import java.util.function.Function; /** @@ -187,4 +189,12 @@ protected ByteSizeValue chunkSize() { public BlobPath basePath() { return basePath; } + + @Override + public List> getRestrictedSystemRepositorySettings() { + List> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(LOCATION_SETTING); + return restrictedSettings; + } } diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 889d0dc6ddb14..cc4f4f7b6413d 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -94,6 +94,7 @@ import java.util.function.Consumer; import java.util.function.Function; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -201,6 +202,13 @@ public void testRegisterRejectsInvalidRepositoryNames() { } } + public void testUpdateOrRegisterRejectsForSystemRepository() { + String repoName = "name"; + PutRepositoryRequest request = new PutRepositoryRequest(repoName); + request.settings(Settings.builder().put(SYSTEM_REPOSITORY_SETTING.getKey(), true).build()); + expectThrows(RepositoryException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); + } + public void testRepositoriesStatsCanHaveTheSameNameAndDifferentTypeOverTime() { String repoName = "name"; expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName)); @@ -310,22 +318,22 @@ public void testRepositoryUpdateWithDifferentCryptoMetadata() { assertNotNull(repository.cryptoHandler); assertEquals(kpTypeA, repository.cryptoHandler.kpType); - expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerRepository(request, null)); + expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); CryptoSettings cryptoSettings = new CryptoSettings(keyProviderName); cryptoSettings.keyProviderType(kpTypeA); cryptoSettings.settings(Settings.builder().put("key-1", "val-1")); request.cryptoSettings(cryptoSettings); - expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerRepository(request, null)); + expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); cryptoSettings.settings(Settings.builder()); cryptoSettings.keyProviderName("random"); - expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerRepository(request, null)); + expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); cryptoSettings.keyProviderName(keyProviderName); assertEquals(kpTypeA, repository.cryptoHandler.kpType); - repositoriesService.registerRepository(request, null); + repositoriesService.registerOrUpdateRepository(request, null); } public void testCryptoManagerClusterStateChanges() { @@ -355,7 +363,7 @@ public void testCryptoManagerClusterStateChanges() { verified, repositoryMetadata ); - repositoriesService.registerRepository(request, null); + repositoriesService.registerOrUpdateRepository(request, null); MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); assertNotNull(repository.cryptoHandler); assertEquals(kpTypeA, repository.cryptoHandler.kpType); @@ -375,7 +383,7 @@ public void testCryptoManagerClusterStateChanges() { verified, repositoryMetadata ); - repositoriesService.registerRepository(request, null); + repositoriesService.registerOrUpdateRepository(request, null); repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); assertNotNull(repository.cryptoHandler); @@ -397,7 +405,7 @@ public void testCryptoManagerClusterStateChanges() { verified, repositoryMetadata ); - repositoriesService.registerRepository(request, null); + repositoriesService.registerOrUpdateRepository(request, null); repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); assertNotNull(repository.cryptoHandler); assertEquals(kpTypeA, repository.cryptoHandler.kpType); @@ -418,7 +426,7 @@ public void testCryptoManagerClusterStateChanges() { verified, repositoryMetadata ); - repositoriesService.registerRepository(request, null); + repositoriesService.registerOrUpdateRepository(request, null); repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); assertNotNull(repository.cryptoHandler); assertEquals(kpTypeB, repository.cryptoHandler.kpType); @@ -530,7 +538,7 @@ private ClusterState emptyState() { private void assertThrowsOnRegister(String repoName) { PutRepositoryRequest request = new PutRepositoryRequest(repoName); - expectThrows(RepositoryException.class, () -> repositoriesService.registerRepository(request, null)); + expectThrows(RepositoryException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); } private static class TestCryptoProvider implements CryptoHandler { diff --git a/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java index 303f60283f69f..d9f599714805b 100644 --- a/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java @@ -58,6 +58,7 @@ import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.shard.ShardId; @@ -70,6 +71,7 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; @@ -90,6 +92,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.is; public class FsRepositoryTests extends OpenSearchTestCase { @@ -218,6 +221,31 @@ public void testSnapshotAndRestore() throws IOException, InterruptedException { } } + public void testRestrictedSettingsDefault() { + Path repo = createTempDir(); + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .put("location", repo) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + RepositoryMetadata metadata = new RepositoryMetadata("test", "fs", settings); + FsRepository repository = new FsRepository( + metadata, + new Environment(settings, null), + NamedXContentRegistry.EMPTY, + BlobStoreTestUtil.mockClusterService(), + new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ); + + List> restrictedSettings = repository.getRestrictedSystemRepositorySettings(); + assertThat(restrictedSettings.size(), is(4)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + assertTrue(restrictedSettings.contains(FsRepository.LOCATION_SETTING)); + } + private void runGeneric(ThreadPool threadPool, Runnable runnable) throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); threadPool.generic().submit(() -> { diff --git a/test/framework/src/main/java/org/opensearch/test/TestCluster.java b/test/framework/src/main/java/org/opensearch/test/TestCluster.java index 61742cd4fb827..a9d9abf69ee41 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/TestCluster.java @@ -42,10 +42,12 @@ import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.index.IndexNotFoundException; import org.opensearch.indices.IndexTemplateMissingException; import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.Closeable; @@ -253,7 +255,18 @@ public void wipeRepositories(String... repositories) { } for (String repository : repositories) { try { - client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + List repositoryMetadata = client().admin() + .cluster() + .prepareGetRepositories(repository) + .execute() + .actionGet() + .repositories(); + if (repositoryMetadata.isEmpty() == false + && BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.get(repositoryMetadata.get(0).settings()) == true) { + client().admin().cluster().prepareCleanupRepository(repository).execute().actionGet(); + } else { + client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + } } catch (RepositoryMissingException ex) { // ignore } From 677ff7513598d5622285a31bd1dd7011abd45bf7 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Wed, 4 Oct 2023 18:47:00 +0530 Subject: [PATCH 21/26] Retry on failure to acquire lock on remote metadata (#10341) Signed-off-by: Sachin Kale --- .../snapshots/SnapshotShardsService.java | 24 +++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index af2f925f89726..1c25d8c71f948 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -37,6 +37,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.IndexCommit; import org.opensearch.Version; +import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.SnapshotsInProgress; @@ -73,6 +74,7 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.nio.file.NoSuchFileException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -401,18 +403,32 @@ private void snapshot( try { if (remoteStoreIndexShallowCopy && indexShard.indexSettings().isRemoteStoreEnabled()) { long startTime = threadPool.relativeTimeInMillis(); + long primaryTerm = indexShard.getOperationPrimaryTerm(); // we flush first to make sure we get the latest writes snapshotted wrappedSnapshot = indexShard.acquireLastIndexCommitAndRefresh(true); - long primaryTerm = indexShard.getOperationPrimaryTerm(); - final IndexCommit snapshotIndexCommit = wrappedSnapshot.get(); + IndexCommit snapshotIndexCommit = wrappedSnapshot.get(); long commitGeneration = snapshotIndexCommit.getGeneration(); - indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + try { + indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } catch (NoSuchFileException e) { + wrappedSnapshot.close(); + logger.warn( + "Exception while acquiring lock on primaryTerm = {} and generation = {}", + primaryTerm, + commitGeneration + ); + indexShard.flush(new FlushRequest(shardId.getIndexName()).force(true)); + wrappedSnapshot = indexShard.acquireLastIndexCommit(false); + snapshotIndexCommit = wrappedSnapshot.get(); + commitGeneration = snapshotIndexCommit.getGeneration(); + indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } try { repository.snapshotRemoteStoreIndexShard( indexShard.store(), snapshot.getSnapshotId(), indexId, - wrappedSnapshot.get(), + snapshotIndexCommit, getShardStateId(indexShard, snapshotIndexCommit), snapshotStatus, primaryTerm, From e5024a834d090703bae97c0d989ad61baf1c4889 Mon Sep 17 00:00:00 2001 From: Ashish Date: Wed, 4 Oct 2023 22:45:47 +0530 Subject: [PATCH 22/26] Enable remote segment upload backpressure by default (#10356) Signed-off-by: Ashish Singh --- CHANGELOG.md | 1 + .../opensearch/index/remote/RemoteStorePressureSettings.java | 2 +- .../index/remote/RemoteStorePressureServiceTests.java | 2 +- .../index/remote/RemoteStorePressureSettingsTests.java | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c847679dafe70..f3a18154b023a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -125,6 +125,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) - Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) - Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) +- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java index 864fe24c282a2..e66aa3444c214 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java @@ -30,7 +30,7 @@ static class Defaults { public static final Setting REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED = Setting.boolSetting( "remote_store.segment.pressure.enabled", - false, + true, Setting.Property.Dynamic, Setting.Property.NodeScope ); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java index ada8d9983aa3d..de610083f3327 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java @@ -58,7 +58,7 @@ public void tearDown() throws Exception { public void testIsSegmentsUploadBackpressureEnabled() { remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); pressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY, remoteStoreStatsTrackerFactory); - assertFalse(pressureService.isSegmentsUploadBackpressureEnabled()); + assertTrue(pressureService.isSegmentsUploadBackpressureEnabled()); Settings newSettings = Settings.builder() .put(RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), "true") diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java index f5514b8936a2f..064c6c10eba02 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java @@ -48,7 +48,7 @@ public void testGetDefaultSettings() { ); // Check remote refresh segment pressure enabled is false - assertFalse(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); + assertTrue(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); // Check bytes lag variance threshold default value assertEquals(10.0, pressureSettings.getBytesLagVarianceFactor(), 0.0d); From a0cb34442f704309173e127675cc4ae83c7fb197 Mon Sep 17 00:00:00 2001 From: Ashish Date: Thu, 5 Oct 2023 00:34:03 +0530 Subject: [PATCH 23/26] Fix shard failure due to translog tragic close on upload failure (#10363) Signed-off-by: Ashish Singh --- .../index/translog/RemoteFsTranslog.java | 23 +++++-------------- .../index/translog/RemoteFsTranslogTests.java | 23 ++++++++++++++++++- 2 files changed, 28 insertions(+), 18 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 857d90e2e2ac2..29c825fd383c5 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -242,15 +242,10 @@ public static TranslogTransferManager buildTranslogTransferManager( @Override public boolean ensureSynced(Location location) throws IOException { - try { - assert location.generation <= current.getGeneration(); - if (location.generation == current.getGeneration()) { - ensureOpen(); - return prepareAndUpload(primaryTermSupplier.getAsLong(), location.generation); - } - } catch (final Exception ex) { - closeOnTragicEvent(ex); - throw ex; + assert location.generation <= current.getGeneration(); + if (location.generation == current.getGeneration()) { + ensureOpen(); + return prepareAndUpload(primaryTermSupplier.getAsLong(), location.generation); } return false; } @@ -355,14 +350,8 @@ private boolean syncToDisk() throws IOException { @Override public void sync() throws IOException { - try { - if (syncToDisk() || syncNeeded()) { - prepareAndUpload(primaryTermSupplier.getAsLong(), null); - } - } catch (final Exception e) { - tragedy.setTragicException(e); - closeOnTragicEvent(e); - throw e; + if (syncToDisk() || syncNeeded()) { + prepareAndUpload(primaryTermSupplier.getAsLong(), null); } } diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 84506f7ab25ff..b2310010620f7 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -67,6 +67,7 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; @@ -1049,7 +1050,7 @@ public void testSyncUpTo() throws IOException { } } - public void testSyncUpFailure() throws IOException { + public void testSyncUpLocationFailure() throws IOException { int translogOperations = randomIntBetween(1, 20); int count = 0; fail.failAlways(); @@ -1101,6 +1102,26 @@ public void testSyncUpFailure() throws IOException { assertDownloadStatsNoDownloads(statsTracker); } + public void testSyncUpAlwaysFailure() throws IOException { + int translogOperations = randomIntBetween(1, 20); + int count = 0; + fail.failAlways(); + for (int op = 0; op < translogOperations; op++) { + translog.add( + new Translog.Index(String.valueOf(op), count, primaryTerm.get(), Integer.toString(count).getBytes(StandardCharsets.UTF_8)) + ); + try { + translog.sync(); + fail("io exception expected"); + } catch (IOException e) { + assertTrue("at least one operation pending", translog.syncNeeded()); + } + } + assertTrue(translog.isOpen()); + fail.failNever(); + translog.sync(); + } + public void testSyncUpToStream() throws IOException { int iters = randomIntBetween(5, 10); for (int i = 0; i < iters; i++) { From 3ed7cc43f0d7c0cb69b07c4e294b7d43aed4b3a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 15:25:23 -0400 Subject: [PATCH 24/26] Bump org.xerial.snappy:snappy-java from 1.1.10.4 to 1.1.10.5 in /test/fixtures/hdfs-fixture (#10299) * Bump org.xerial.snappy:snappy-java in /test/fixtures/hdfs-fixture Bumps [org.xerial.snappy:snappy-java](https://github.com/xerial/snappy-java) from 1.1.10.4 to 1.1.10.5. - [Release notes](https://github.com/xerial/snappy-java/releases) - [Commits](https://github.com/xerial/snappy-java/compare/v1.1.10.4...v1.1.10.5) --- updated-dependencies: - dependency-name: org.xerial.snappy:snappy-java dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 4 ++-- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3a18154b023a..50922b85a0c0d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -104,7 +104,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) - Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) - Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) -- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.4 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.0 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208)) - Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) - Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` @@ -143,4 +143,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 4aad96eddaeb5..7149efe27d694 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -72,5 +72,5 @@ dependencies { exclude group: "com.squareup.okio" } runtimeOnly "com.squareup.okio:okio:3.5.0" - runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.4" + runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.5" } From 28f185b347a3333c8670ca1a7bd7d0a85fed14e9 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Wed, 4 Oct 2023 17:12:05 -0500 Subject: [PATCH 25/26] Refactor multipart download to a more async model (#10349) * Refactor read context streams to async streams Signed-off-by: Kunal Kotwani * Refactor multipart download to a more async model The previous approach of kicking off the stream requests for all parts of a file did not work well for very large files. For example, a 20GiB file uploaded in 16MiB parts will consist of 1200+ parts. When we attempted to initiate streaming for all parts concurrently, some parts would hit a client timeout after 2 minutes without being able to get a connection due to the other parts not having been completed in that time frame. This refactoring adds yet another layer of indirection in order to allow the code that is actually writing the destination file to control the rate at which streams are started. This should allow for downloading files consisting of arbitrarily many parts at any connection speed. This commit also wires in the download rate limiter so that the `indices.recovery.max_bytes_per_sec` is properly honored. Signed-off-by: Andrew Ross --------- Signed-off-by: Kunal Kotwani Signed-off-by: Andrew Ross Co-authored-by: Kunal Kotwani --- .../repositories/s3/S3BlobContainer.java | 28 +--- .../s3/S3BlobStoreContainerTests.java | 4 +- .../opensearch/index/shard/IndexShardIT.java | 3 +- .../mocks/MockFsAsyncBlobContainer.java | 5 +- .../AsyncMultiStreamBlobContainer.java | 16 -- ...syncMultiStreamEncryptedBlobContainer.java | 6 +- .../blobstore/stream/read/ReadContext.java | 32 +++- .../read/listener/FileCompletionListener.java | 47 ------ .../stream/read/listener/FilePartWriter.java | 70 ++------- .../read/listener/ReadContextListener.java | 139 +++++++++++++++--- .../common/settings/ClusterSettings.java | 1 + .../org/opensearch/index/IndexService.java | 4 +- .../opensearch/index/shard/IndexShard.java | 39 +++-- .../opensearch/index/shard/StoreRecovery.java | 11 +- .../index/store/RemoteDirectory.java | 8 +- .../store/RemoteSegmentStoreDirectory.java | 18 ++- .../RemoteSegmentStoreDirectoryFactory.java | 15 +- .../indices/recovery/RecoverySettings.java | 25 ++++ .../RemoteStoreReplicationSource.java | 16 +- .../main/java/org/opensearch/node/Node.java | 6 +- .../blobstore/BlobStoreRepository.java | 6 +- .../org/opensearch/threadpool/ThreadPool.java | 6 + ...ultiStreamEncryptedBlobContainerTests.java | 10 +- .../listener/FileCompletionListenerTests.java | 58 -------- .../read/listener/FilePartWriterTests.java | 104 +------------ .../listener/ReadContextListenerTests.java | 54 +++++-- .../opensearch/index/IndexModuleTests.java | 3 +- .../RemoteStoreRefreshListenerTests.java | 4 +- ...moteSegmentStoreDirectoryFactoryTests.java | 7 +- .../RemoteSegmentStoreDirectoryTests.java | 29 ++-- .../snapshots/SnapshotResiliencyTests.java | 3 +- .../index/shard/IndexShardTestCase.java | 17 ++- .../recovery/DefaultRecoverySettings.java | 24 +++ 33 files changed, 400 insertions(+), 418 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListener.java delete mode 100644 server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListenerTests.java create mode 100644 test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index c6ae58371e15c..fcfccf50ad326 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -241,37 +241,23 @@ public void readBlobAsync(String blobName, ActionListener listener) return; } - final List> blobPartInputStreamFutures = new ArrayList<>(); + final List blobPartInputStreamFutures = new ArrayList<>(); final long blobSize = blobMetadata.objectSize(); final Integer numberOfParts = blobMetadata.objectParts() == null ? null : blobMetadata.objectParts().totalPartsCount(); final String blobChecksum = blobMetadata.checksum().checksumCRC32(); if (numberOfParts == null) { - blobPartInputStreamFutures.add(getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, null)); + blobPartInputStreamFutures.add(() -> getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, null)); } else { // S3 multipart files use 1 to n indexing for (int partNumber = 1; partNumber <= numberOfParts; partNumber++) { - blobPartInputStreamFutures.add(getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, partNumber)); + final int innerPartNumber = partNumber; + blobPartInputStreamFutures.add( + () -> getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, innerPartNumber) + ); } } - - CompletableFuture.allOf(blobPartInputStreamFutures.toArray(CompletableFuture[]::new)) - .whenComplete((unused, partThrowable) -> { - if (partThrowable == null) { - listener.onResponse( - new ReadContext( - blobSize, - blobPartInputStreamFutures.stream().map(CompletableFuture::join).collect(Collectors.toList()), - blobChecksum - ) - ); - } else { - Exception ex = partThrowable.getCause() instanceof Exception - ? (Exception) partThrowable.getCause() - : new Exception(partThrowable.getCause()); - listener.onFailure(ex); - } - }); + listener.onResponse(new ReadContext(blobSize, blobPartInputStreamFutures, blobChecksum)); }); } catch (Exception ex) { listener.onFailure(SdkException.create("Error occurred while fetching blob parts from the repository", ex)); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 9817d7cd520ef..2e54705e9cd78 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -969,7 +969,7 @@ public void testReadBlobAsyncMultiPart() throws Exception { assertEquals(objectSize, readContext.getBlobSize()); for (int partNumber = 1; partNumber < objectPartCount; partNumber++) { - InputStreamContainer inputStreamContainer = readContext.getPartStreams().get(partNumber); + InputStreamContainer inputStreamContainer = readContext.getPartStreams().get(partNumber).get().join(); final int offset = partNumber * partSize; assertEquals(partSize, inputStreamContainer.getContentLength()); assertEquals(offset, inputStreamContainer.getOffset()); @@ -1024,7 +1024,7 @@ public void testReadBlobAsyncSinglePart() throws Exception { assertEquals(checksum, readContext.getBlobChecksum()); assertEquals(objectSize, readContext.getBlobSize()); - InputStreamContainer inputStreamContainer = readContext.getPartStreams().stream().findFirst().get(); + InputStreamContainer inputStreamContainer = readContext.getPartStreams().stream().findFirst().get().get().join(); assertEquals(objectSize, inputStreamContainer.getContentLength()); assertEquals(0, inputStreamContainer.getOffset()); assertEquals(objectSize, inputStreamContainer.getInputStream().readAllBytes().length); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 07f85496f13cf..c394a1f631690 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -712,7 +712,8 @@ public static final IndexShard newIndexShard( null, null, () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, - nodeId + nodeId, + null ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java index 079753de95680..36987ac2d4991 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java @@ -27,6 +27,7 @@ import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -124,11 +125,11 @@ public void readBlobAsync(String blobName, ActionListener listener) long contentLength = listBlobs().get(blobName).length(); long partSize = contentLength / 10; int numberOfParts = (int) ((contentLength % partSize) == 0 ? contentLength / partSize : (contentLength / partSize) + 1); - List blobPartStreams = new ArrayList<>(); + List blobPartStreams = new ArrayList<>(); for (int partNumber = 0; partNumber < numberOfParts; partNumber++) { long offset = partNumber * partSize; InputStreamContainer blobPartStream = new InputStreamContainer(readBlob(blobName, offset, partSize), partSize, offset); - blobPartStreams.add(blobPartStream); + blobPartStreams.add(() -> CompletableFuture.completedFuture(blobPartStream)); } ReadContext blobReadContext = new ReadContext(contentLength, blobPartStreams, null); listener.onResponse(blobReadContext); diff --git a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java index e73a9f5cd0bc9..97f304d776f5c 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java @@ -10,13 +10,10 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.blobstore.stream.read.ReadContext; -import org.opensearch.common.blobstore.stream.read.listener.ReadContextListener; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.core.action.ActionListener; -import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.nio.file.Path; /** * An extension of {@link BlobContainer} that adds {@link AsyncMultiStreamBlobContainer#asyncBlobUpload} to allow @@ -45,19 +42,6 @@ public interface AsyncMultiStreamBlobContainer extends BlobContainer { @ExperimentalApi void readBlobAsync(String blobName, ActionListener listener); - /** - * Asynchronously downloads the blob to the specified location using an executor from the thread pool. - * @param blobName The name of the blob for which needs to be downloaded. - * @param fileLocation The path on local disk where the blob needs to be downloaded. - * @param threadPool The threadpool instance which will provide the executor for performing a multipart download. - * @param completionListener Listener which will be notified when the download is complete. - */ - @ExperimentalApi - default void asyncBlobDownload(String blobName, Path fileLocation, ThreadPool threadPool, ActionListener completionListener) { - ReadContextListener readContextListener = new ReadContextListener(blobName, fileLocation, threadPool, completionListener); - readBlobAsync(blobName, readContextListener); - } - /* * Wether underlying blobContainer can verify integrity of data after transfer. If true and if expected * checksum is provided in WriteContext, then the checksum of transferred data is compared with expected checksum diff --git a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java index c64dc6b9e3ae4..82bc7a0baed50 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java @@ -144,8 +144,10 @@ public long getBlobSize() { } @Override - public List getPartStreams() { - return super.getPartStreams().stream().map(this::decryptInputStreamContainer).collect(Collectors.toList()); + public List getPartStreams() { + return super.getPartStreams().stream() + .map(supplier -> (StreamPartCreator) () -> supplier.get().thenApply(this::decryptInputStreamContainer)) + .collect(Collectors.toUnmodifiableList()); } /** diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java index 2c305fb03c475..4bdce11ff4f9a 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java @@ -12,6 +12,8 @@ import org.opensearch.common.io.InputStreamContainer; import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; /** * ReadContext is used to encapsulate all data needed by BlobContainer#readBlobAsync @@ -19,18 +21,18 @@ @ExperimentalApi public class ReadContext { private final long blobSize; - private final List partStreams; + private final List asyncPartStreams; private final String blobChecksum; - public ReadContext(long blobSize, List partStreams, String blobChecksum) { + public ReadContext(long blobSize, List asyncPartStreams, String blobChecksum) { this.blobSize = blobSize; - this.partStreams = partStreams; + this.asyncPartStreams = asyncPartStreams; this.blobChecksum = blobChecksum; } public ReadContext(ReadContext readContext) { this.blobSize = readContext.blobSize; - this.partStreams = readContext.partStreams; + this.asyncPartStreams = readContext.asyncPartStreams; this.blobChecksum = readContext.blobChecksum; } @@ -39,14 +41,30 @@ public String getBlobChecksum() { } public int getNumberOfParts() { - return partStreams.size(); + return asyncPartStreams.size(); } public long getBlobSize() { return blobSize; } - public List getPartStreams() { - return partStreams; + public List getPartStreams() { + return asyncPartStreams; + } + + /** + * Functional interface defining an instance that can create an async action + * to create a part of an object represented as an InputStreamContainer. + */ + @FunctionalInterface + public interface StreamPartCreator extends Supplier> { + /** + * Kicks off a async process to start streaming. + * + * @return When the returned future is completed, streaming has + * just begun. Clients must fully consume the resulting stream. + */ + @Override + CompletableFuture get(); } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListener.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListener.java deleted file mode 100644 index aadd6e2ab304e..0000000000000 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListener.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.blobstore.stream.read.listener; - -import org.opensearch.common.annotation.InternalApi; -import org.opensearch.core.action.ActionListener; - -import java.util.concurrent.atomic.AtomicInteger; - -/** - * FileCompletionListener listens for completion of fetch on all the streams for a file, where - * individual streams are handled using {@link FilePartWriter}. The {@link FilePartWriter}(s) - * hold a reference to the file completion listener to be notified. - */ -@InternalApi -class FileCompletionListener implements ActionListener { - - private final int numberOfParts; - private final String fileName; - private final AtomicInteger completedPartsCount; - private final ActionListener completionListener; - - public FileCompletionListener(int numberOfParts, String fileName, ActionListener completionListener) { - this.completedPartsCount = new AtomicInteger(); - this.numberOfParts = numberOfParts; - this.fileName = fileName; - this.completionListener = completionListener; - } - - @Override - public void onResponse(Integer unused) { - if (completedPartsCount.incrementAndGet() == numberOfParts) { - completionListener.onResponse(fileName); - } - } - - @Override - public void onFailure(Exception e) { - completionListener.onFailure(e); - } -} diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java index 84fd7ed9ffebf..1a403200249cd 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java @@ -8,83 +8,37 @@ package org.opensearch.common.blobstore.stream.read.listener; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.io.Channels; import org.opensearch.common.io.InputStreamContainer; -import org.opensearch.core.action.ActionListener; import java.io.IOException; import java.io.InputStream; import java.nio.channels.FileChannel; -import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.UnaryOperator; /** * FilePartWriter transfers the provided stream into the specified file path using a {@link FileChannel} - * instance. It performs offset based writes to the file and notifies the {@link FileCompletionListener} on completion. + * instance. */ @InternalApi -class FilePartWriter implements Runnable { - - private final int partNumber; - private final InputStreamContainer blobPartStreamContainer; - private final Path fileLocation; - private final AtomicBoolean anyPartStreamFailed; - private final ActionListener fileCompletionListener; - private static final Logger logger = LogManager.getLogger(FilePartWriter.class); - +class FilePartWriter { // 8 MB buffer for transfer private static final int BUFFER_SIZE = 8 * 1024 * 2024; - public FilePartWriter( - int partNumber, - InputStreamContainer blobPartStreamContainer, - Path fileLocation, - AtomicBoolean anyPartStreamFailed, - ActionListener fileCompletionListener - ) { - this.partNumber = partNumber; - this.blobPartStreamContainer = blobPartStreamContainer; - this.fileLocation = fileLocation; - this.anyPartStreamFailed = anyPartStreamFailed; - this.fileCompletionListener = fileCompletionListener; - } - - @Override - public void run() { - // Ensures no writes to the file if any stream fails. - if (anyPartStreamFailed.get() == false) { - try (FileChannel outputFileChannel = FileChannel.open(fileLocation, StandardOpenOption.WRITE, StandardOpenOption.CREATE)) { - try (InputStream inputStream = blobPartStreamContainer.getInputStream()) { - long streamOffset = blobPartStreamContainer.getOffset(); - final byte[] buffer = new byte[BUFFER_SIZE]; - int bytesRead; - while ((bytesRead = inputStream.read(buffer)) != -1) { - Channels.writeToChannel(buffer, 0, bytesRead, outputFileChannel, streamOffset); - streamOffset += bytesRead; - } + public static void write(Path fileLocation, InputStreamContainer stream, UnaryOperator rateLimiter) throws IOException { + try (FileChannel outputFileChannel = FileChannel.open(fileLocation, StandardOpenOption.WRITE, StandardOpenOption.CREATE)) { + try (InputStream inputStream = rateLimiter.apply(stream.getInputStream())) { + long streamOffset = stream.getOffset(); + final byte[] buffer = new byte[BUFFER_SIZE]; + int bytesRead; + while ((bytesRead = inputStream.read(buffer)) != -1) { + Channels.writeToChannel(buffer, 0, bytesRead, outputFileChannel, streamOffset); + streamOffset += bytesRead; } - } catch (IOException e) { - processFailure(e); - return; } - fileCompletionListener.onResponse(partNumber); - } - } - - void processFailure(Exception e) { - try { - Files.deleteIfExists(fileLocation); - } catch (IOException ex) { - // Die silently - logger.info("Failed to delete file {} on stream failure: {}", fileLocation, ex); - } - if (anyPartStreamFailed.getAndSet(true) == false) { - fileCompletionListener.onFailure(e); } } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java index 4338bddb3fbe7..2914fd0c440fa 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java @@ -10,51 +10,73 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.action.support.GroupedActionListener; import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.core.action.ActionListener; import org.opensearch.threadpool.ThreadPool; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; import java.nio.file.Path; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.UnaryOperator; /** * ReadContextListener orchestrates the async file fetch from the {@link org.opensearch.common.blobstore.BlobContainer} - * using a {@link ReadContext} callback. On response, it spawns off the download using multiple streams which are - * spread across a {@link ThreadPool} executor. + * using a {@link ReadContext} callback. On response, it spawns off the download using multiple streams. */ @InternalApi public class ReadContextListener implements ActionListener { + private static final Logger logger = LogManager.getLogger(ReadContextListener.class); - private final String fileName; + private final String blobName; private final Path fileLocation; - private final ThreadPool threadPool; private final ActionListener completionListener; - private static final Logger logger = LogManager.getLogger(ReadContextListener.class); + private final ThreadPool threadPool; + private final UnaryOperator rateLimiter; + private final int maxConcurrentStreams; - public ReadContextListener(String fileName, Path fileLocation, ThreadPool threadPool, ActionListener completionListener) { - this.fileName = fileName; + public ReadContextListener( + String blobName, + Path fileLocation, + ActionListener completionListener, + ThreadPool threadPool, + UnaryOperator rateLimiter, + int maxConcurrentStreams + ) { + this.blobName = blobName; this.fileLocation = fileLocation; - this.threadPool = threadPool; this.completionListener = completionListener; + this.threadPool = threadPool; + this.rateLimiter = rateLimiter; + this.maxConcurrentStreams = maxConcurrentStreams; } @Override public void onResponse(ReadContext readContext) { - logger.trace("Streams received for blob {}", fileName); + logger.debug("Received {} parts for blob {}", readContext.getNumberOfParts(), blobName); final int numParts = readContext.getNumberOfParts(); - final AtomicBoolean anyPartStreamFailed = new AtomicBoolean(); - FileCompletionListener fileCompletionListener = new FileCompletionListener(numParts, fileName, completionListener); - - for (int partNumber = 0; partNumber < numParts; partNumber++) { - FilePartWriter filePartWriter = new FilePartWriter( - partNumber, - readContext.getPartStreams().get(partNumber), - fileLocation, - anyPartStreamFailed, - fileCompletionListener - ); - threadPool.executor(ThreadPool.Names.GENERIC).submit(filePartWriter); + final AtomicBoolean anyPartStreamFailed = new AtomicBoolean(false); + final GroupedActionListener groupedListener = new GroupedActionListener<>( + ActionListener.wrap(r -> completionListener.onResponse(blobName), completionListener::onFailure), + numParts + ); + final Queue queue = new ConcurrentLinkedQueue<>(readContext.getPartStreams()); + final StreamPartProcessor processor = new StreamPartProcessor( + queue, + anyPartStreamFailed, + fileLocation, + groupedListener, + threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY), + rateLimiter + ); + for (int i = 0; i < Math.min(maxConcurrentStreams, queue.size()); i++) { + processor.process(queue.poll()); } } @@ -62,4 +84,79 @@ public void onResponse(ReadContext readContext) { public void onFailure(Exception e) { completionListener.onFailure(e); } + + private static class StreamPartProcessor { + private static final RuntimeException CANCELED_PART_EXCEPTION = new RuntimeException( + "Canceled part download due to previous failure" + ); + private final Queue queue; + private final AtomicBoolean anyPartStreamFailed; + private final Path fileLocation; + private final GroupedActionListener completionListener; + private final Executor executor; + private final UnaryOperator rateLimiter; + + private StreamPartProcessor( + Queue queue, + AtomicBoolean anyPartStreamFailed, + Path fileLocation, + GroupedActionListener completionListener, + Executor executor, + UnaryOperator rateLimiter + ) { + this.queue = queue; + this.anyPartStreamFailed = anyPartStreamFailed; + this.fileLocation = fileLocation; + this.completionListener = completionListener; + this.executor = executor; + this.rateLimiter = rateLimiter; + } + + private void process(ReadContext.StreamPartCreator supplier) { + if (supplier == null) { + return; + } + supplier.get().whenCompleteAsync((blobPartStreamContainer, throwable) -> { + if (throwable != null) { + processFailure(throwable instanceof Exception ? (Exception) throwable : new RuntimeException(throwable)); + } else if (anyPartStreamFailed.get()) { + processFailure(CANCELED_PART_EXCEPTION); + } else { + try { + FilePartWriter.write(fileLocation, blobPartStreamContainer, rateLimiter); + completionListener.onResponse(fileLocation.toString()); + + // Upon successfully completing a file part, pull another + // file part off the queue to trigger asynchronous processing + process(queue.poll()); + } catch (Exception e) { + processFailure(e); + } + } + }, executor); + } + + private void processFailure(Exception e) { + if (anyPartStreamFailed.getAndSet(true) == false) { + completionListener.onFailure(e); + + // Drain the queue of pending part downloads. These can be discarded + // since they haven't started any work yet, but the listener must be + // notified for each part. + Object item = queue.poll(); + while (item != null) { + completionListener.onFailure(CANCELED_PART_EXCEPTION); + item = queue.poll(); + } + } else { + completionListener.onFailure(e); + } + try { + Files.deleteIfExists(fileLocation); + } catch (IOException ex) { + // Die silently + logger.info("Failed to delete file {} on stream failure: {}", fileLocation, ex); + } + } + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 5261d40387dc6..4cd3490cffb4c 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -287,6 +287,7 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, + RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index fdda8d4ce2497..df8e8070b8e03 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -89,6 +89,7 @@ import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.similarity.SimilarityService; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; @@ -520,7 +521,8 @@ public synchronized IndexShard createShard( remoteStore, remoteStoreStatsTrackerFactory, clusterRemoteTranslogBufferIntervalSupplier, - nodeEnv.nodeId() + nodeEnv.nodeId(), + (RemoteSegmentStoreDirectoryFactory) remoteDirectoryFactory ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 5818b2d866854..4f08411c19b55 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -62,7 +62,6 @@ import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.upgrade.post.UpgradeRequest; -import org.opensearch.action.support.GroupedActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.replication.PendingReplicationActions; import org.opensearch.action.support.replication.ReplicationResponse; @@ -162,6 +161,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; import org.opensearch.index.store.Store.MetadataSnapshot; import org.opensearch.index.store.StoreFileMetadata; @@ -341,6 +341,7 @@ Runnable getGlobalCheckpointSyncer() { private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; private final List internalRefreshListener = new ArrayList<>(); + private final RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory; public IndexShard( final ShardRouting shardRouting, @@ -368,7 +369,11 @@ public IndexShard( @Nullable final Store remoteStore, final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, final Supplier clusterRemoteTranslogBufferIntervalSupplier, - final String nodeId + final String nodeId, + // Wiring a directory factory here breaks some intended abstractions, but this remote directory + // factory is used not as a Lucene directory but instead to copy files from a remote store when + // restoring a shallow snapshot. + @Nullable final RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -464,6 +469,7 @@ public boolean shouldCache(Query query) { ? false : mapperService.documentMapper().mappers().containsTimeStampField(); this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; + this.remoteSegmentStoreDirectoryFactory = remoteSegmentStoreDirectoryFactory; } public ThreadPool getThreadPool() { @@ -2696,7 +2702,7 @@ public void restoreFromRemoteStore(ActionListener listener) { public void restoreFromSnapshotAndRemoteStore( Repository repository, - RepositoriesService repositoriesService, + RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory, ActionListener listener ) { try { @@ -2704,7 +2710,7 @@ public void restoreFromSnapshotAndRemoteStore( assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " + recoveryState.getRecoverySource(); StoreRecovery storeRecovery = new StoreRecovery(shardId, logger); - storeRecovery.recoverFromSnapshotAndRemoteStore(this, repository, repositoriesService, listener, threadPool); + storeRecovery.recoverFromSnapshotAndRemoteStore(this, repository, remoteSegmentStoreDirectoryFactory, listener); } catch (Exception e) { listener.onFailure(e); } @@ -3544,7 +3550,7 @@ public void startRecovery( "from snapshot and remote store", recoveryState, recoveryListener, - l -> restoreFromSnapshotAndRemoteStore(repositoriesService.repository(repo), repositoriesService, l) + l -> restoreFromSnapshotAndRemoteStore(repositoriesService.repository(repo), remoteSegmentStoreDirectoryFactory, l) ); // indicesService.indexService(shardRouting.shardId().getIndex()).addMetadataListener(); } else { @@ -4921,24 +4927,17 @@ private void downloadSegments( RemoteSegmentStoreDirectory targetRemoteDirectory, Set toDownloadSegments, final Runnable onFileSync - ) { - final PlainActionFuture completionListener = PlainActionFuture.newFuture(); - final GroupedActionListener batchDownloadListener = new GroupedActionListener<>( - ActionListener.map(completionListener, v -> null), - toDownloadSegments.size() - ); - - final ActionListener segmentsDownloadListener = ActionListener.map(batchDownloadListener, fileName -> { + ) throws IOException { + final Path indexPath = store.shardPath() == null ? null : store.shardPath().resolveIndex(); + for (String segment : toDownloadSegments) { + final PlainActionFuture segmentListener = PlainActionFuture.newFuture(); + sourceRemoteDirectory.copyTo(segment, storeDirectory, indexPath, segmentListener); + segmentListener.actionGet(); onFileSync.run(); if (targetRemoteDirectory != null) { - targetRemoteDirectory.copyFrom(storeDirectory, fileName, fileName, IOContext.DEFAULT); + targetRemoteDirectory.copyFrom(storeDirectory, segment, segment, IOContext.DEFAULT); } - return null; - }); - - final Path indexPath = store.shardPath() == null ? null : store.shardPath().resolveIndex(); - toDownloadSegments.forEach(file -> { sourceRemoteDirectory.copyTo(file, storeDirectory, indexPath, segmentsDownloadListener); }); - completionListener.actionGet(); + } } private boolean localDirectoryContains(Directory localDirectory, String file, long checksum) { diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index c0211e1257c8e..762aab51469d0 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -70,9 +70,7 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.repositories.IndexId; -import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; -import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.channels.FileChannel; @@ -362,9 +360,8 @@ void recoverFromRepository(final IndexShard indexShard, Repository repository, A void recoverFromSnapshotAndRemoteStore( final IndexShard indexShard, Repository repository, - RepositoriesService repositoriesService, - ActionListener listener, - ThreadPool threadPool + RemoteSegmentStoreDirectoryFactory directoryFactory, + ActionListener listener ) { try { if (canRecover(indexShard)) { @@ -392,10 +389,6 @@ void recoverFromSnapshotAndRemoteStore( remoteStoreRepository = shallowCopyShardMetadata.getRemoteStoreRepository(); } - RemoteSegmentStoreDirectoryFactory directoryFactory = new RemoteSegmentStoreDirectoryFactory( - () -> repositoriesService, - threadPool - ); RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory( remoteStoreRepository, indexUUID, diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java index 594b7f99cd85a..eb75c39532d71 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -62,9 +62,9 @@ public class RemoteDirectory extends Directory { protected final BlobContainer blobContainer; private static final Logger logger = LogManager.getLogger(RemoteDirectory.class); - protected final UnaryOperator uploadRateLimiter; + private final UnaryOperator uploadRateLimiter; - protected final UnaryOperator downloadRateLimiter; + private final UnaryOperator downloadRateLimiter; /** * Number of bytes in the segment file to store checksum @@ -333,6 +333,10 @@ public boolean copyFrom( return false; } + protected UnaryOperator getDownloadRateLimiter() { + return downloadRateLimiter; + } + private void uploadBlob( Directory from, String src, diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 21a84f2b8c903..a97b22360716c 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -25,6 +25,7 @@ import org.apache.lucene.util.Version; import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.stream.read.listener.ReadContextListener; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.logging.Loggers; @@ -37,6 +38,7 @@ import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.threadpool.ThreadPool; @@ -90,6 +92,8 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private final ThreadPool threadPool; + private final RecoverySettings recoverySettings; + /** * Keeps track of local segment filename to uploaded filename along with other attributes like checksum. * This map acts as a cache layer for uploaded segment filenames which helps avoid calling listAll() each time. @@ -122,13 +126,15 @@ public RemoteSegmentStoreDirectory( RemoteDirectory remoteMetadataDirectory, RemoteStoreLockManager mdLockManager, ThreadPool threadPool, - ShardId shardId + ShardId shardId, + RecoverySettings recoverySettings ) throws IOException { super(remoteDataDirectory); this.remoteDataDirectory = remoteDataDirectory; this.remoteMetadataDirectory = remoteMetadataDirectory; this.mdLockManager = mdLockManager; this.threadPool = threadPool; + this.recoverySettings = recoverySettings; this.logger = Loggers.getLogger(getClass(), shardId); init(); } @@ -488,7 +494,15 @@ public void copyTo(String source, Directory destinationDirectory, Path destinati if (destinationPath != null && remoteDataDirectory.getBlobContainer() instanceof AsyncMultiStreamBlobContainer) { final AsyncMultiStreamBlobContainer blobContainer = (AsyncMultiStreamBlobContainer) remoteDataDirectory.getBlobContainer(); final Path destinationFilePath = destinationPath.resolve(source); - blobContainer.asyncBlobDownload(blobName, destinationFilePath, threadPool, fileCompletionListener); + final ReadContextListener readContextListener = new ReadContextListener( + blobName, + destinationFilePath, + fileCompletionListener, + threadPool, + remoteDataDirectory.getDownloadRateLimiter(), + recoverySettings.getMaxConcurrentRemoteStoreStreams() + ); + blobContainer.readBlobAsync(blobName, readContextListener); } else { // Fallback to older mechanism of downloading the file try { diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 490b07e441702..cc55380894ecd 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -15,6 +15,7 @@ import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -34,12 +35,18 @@ public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.Dire private static final String SEGMENTS = "segments"; private final Supplier repositoriesService; + private final RecoverySettings recoverySettings; private final ThreadPool threadPool; - public RemoteSegmentStoreDirectoryFactory(Supplier repositoriesService, ThreadPool threadPool) { + public RemoteSegmentStoreDirectoryFactory( + Supplier repositoriesService, + ThreadPool threadPool, + RecoverySettings recoverySettings + ) { this.repositoriesService = repositoriesService; this.threadPool = threadPool; + this.recoverySettings = recoverySettings; } @Override @@ -71,13 +78,9 @@ public Directory newDirectory(String repositoryName, String indexUUID, ShardId s String.valueOf(shardId.id()) ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId, recoverySettings); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } } - - private RemoteDirectory createRemoteDirectory(BlobStoreRepository repository, BlobPath commonBlobPath, String extension) { - return new RemoteDirectory(repository.blobStore().blobContainer(commonBlobPath.add(extension))); - } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index e2346ae078339..ed9755bf824ea 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -84,6 +84,17 @@ public class RecoverySettings { Property.NodeScope ); + /** + * Controls the maximum number of streams that can be started concurrently when downloading from the remote store. + */ + public static final Setting INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING = Setting.intSetting( + "indices.recovery.max_concurrent_remote_store_streams", + 20, + 1, + Property.Dynamic, + Property.NodeScope + ); + /** * how long to wait before retrying after issues cause by cluster state syncing between nodes * i.e., local node is not yet known on remote node, remote shard not yet started etc. @@ -149,6 +160,7 @@ public class RecoverySettings { private volatile ByteSizeValue maxBytesPerSec; private volatile int maxConcurrentFileChunks; private volatile int maxConcurrentOperations; + private volatile int maxConcurrentRemoteStoreStreams; private volatile SimpleRateLimiter rateLimiter; private volatile TimeValue retryDelayStateSync; private volatile TimeValue retryDelayNetwork; @@ -163,6 +175,7 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings); this.maxConcurrentFileChunks = INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING.get(settings); this.maxConcurrentOperations = INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING.get(settings); + this.maxConcurrentRemoteStoreStreams = INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING.get(settings); // doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes) // and we want to give the cluster-manager time to remove a faulty node this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings); @@ -184,6 +197,10 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, this::setMaxConcurrentFileChunks); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, this::setMaxConcurrentOperations); + clusterSettings.addSettingsUpdateConsumer( + INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, + this::setMaxConcurrentRemoteStoreStreams + ); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout); @@ -279,4 +296,12 @@ public int getMaxConcurrentOperations() { private void setMaxConcurrentOperations(int maxConcurrentOperations) { this.maxConcurrentOperations = maxConcurrentOperations; } + + public int getMaxConcurrentRemoteStoreStreams() { + return this.maxConcurrentRemoteStoreStreams; + } + + private void setMaxConcurrentRemoteStoreStreams(int maxConcurrentRemoteStoreStreams) { + this.maxConcurrentRemoteStoreStreams = maxConcurrentRemoteStoreStreams; + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index aeb690465905f..e17c5293c38ac 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -14,7 +14,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.util.Version; -import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; @@ -141,14 +141,12 @@ private void downloadSegments( ActionListener completionListener ) { final Path indexPath = shardPath == null ? null : shardPath.resolveIndex(); - final GroupedActionListener batchDownloadListener = new GroupedActionListener<>( - ActionListener.map(completionListener, v -> new GetSegmentFilesResponse(toDownloadSegments)), - toDownloadSegments.size() - ); - ActionListener segmentsDownloadListener = ActionListener.map(batchDownloadListener, result -> null); - toDownloadSegments.forEach( - fileMetadata -> remoteStoreDirectory.copyTo(fileMetadata.name(), storeDirectory, indexPath, segmentsDownloadListener) - ); + for (StoreFileMetadata storeFileMetadata : toDownloadSegments) { + final PlainActionFuture segmentListener = PlainActionFuture.newFuture(); + remoteStoreDirectory.copyTo(storeFileMetadata.name(), storeDirectory, indexPath, segmentListener); + segmentListener.actionGet(); + } + completionListener.onResponse(new GetSegmentFilesResponse(toDownloadSegments)); } @Override diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 729b38ca27394..02f6bdd5ad24c 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -757,9 +757,12 @@ protected Node( rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); + final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); + final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( repositoriesServiceReference::get, - threadPool + threadPool, + recoverySettings ); final SearchRequestStats searchRequestStats = new SearchRequestStats(); @@ -951,7 +954,6 @@ protected Node( transportService.getTaskManager() ); - final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); RepositoriesModule repositoriesModule = new RepositoriesModule( this.environment, pluginsService.filterPlugins(RepositoryPlugin.class), diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 3481e43cf4c72..41ad357eaeed9 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -1126,7 +1126,8 @@ private void executeStaleShardDelete( // see https://github.com/opensearch-project/OpenSearch/issues/8469 new RemoteSegmentStoreDirectoryFactory( remoteStoreLockManagerFactory.getRepositoriesService(), - threadPool + threadPool, + recoverySettings ).newDirectory( remoteStoreRepoForIndex, indexUUID, @@ -1596,7 +1597,8 @@ private void executeOneStaleIndexDelete( // see https://github.com/opensearch-project/OpenSearch/issues/8469 new RemoteSegmentStoreDirectoryFactory( remoteStoreLockManagerFactory.getRepositoriesService(), - threadPool + threadPool, + recoverySettings ).newDirectory( remoteStoreRepoForIndex, indexUUID, diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 8375ac34972af..ecb5b2cef58ac 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -115,6 +115,7 @@ public static class Names { public static final String TRANSLOG_SYNC = "translog_sync"; public static final String REMOTE_PURGE = "remote_purge"; public static final String REMOTE_REFRESH_RETRY = "remote_refresh_retry"; + public static final String REMOTE_RECOVERY = "remote_recovery"; public static final String INDEX_SEARCHER = "index_searcher"; } @@ -184,6 +185,7 @@ public static ThreadPoolType fromType(String type) { map.put(Names.TRANSLOG_SYNC, ThreadPoolType.FIXED); map.put(Names.REMOTE_PURGE, ThreadPoolType.SCALING); map.put(Names.REMOTE_REFRESH_RETRY, ThreadPoolType.SCALING); + map.put(Names.REMOTE_RECOVERY, ThreadPoolType.SCALING); if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE); } @@ -269,6 +271,10 @@ public ThreadPool( Names.REMOTE_REFRESH_RETRY, new ScalingExecutorBuilder(Names.REMOTE_REFRESH_RETRY, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5)) ); + builders.put( + Names.REMOTE_RECOVERY, + new ScalingExecutorBuilder(Names.REMOTE_RECOVERY, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5)) + ); if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { builders.put( Names.INDEX_SEARCHER, diff --git a/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java b/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java index 947a4f9b1c9ab..1780819390052 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java @@ -20,6 +20,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.function.UnaryOperator; import org.mockito.Mockito; @@ -51,10 +52,12 @@ public void testReadBlobAsync() throws Exception { // Objects needed for API call final byte[] data = new byte[size]; Randomness.get().nextBytes(data); + final InputStreamContainer inputStreamContainer = new InputStreamContainer(new ByteArrayInputStream(data), data.length, 0); final ListenerTestUtils.CountingCompletionListener completionListener = new ListenerTestUtils.CountingCompletionListener<>(); - final ReadContext readContext = new ReadContext(size, List.of(inputStreamContainer), null); + final CompletableFuture streamContainerFuture = CompletableFuture.completedFuture(inputStreamContainer); + final ReadContext readContext = new ReadContext(size, List.of(() -> streamContainerFuture), null); Mockito.doAnswer(invocation -> { ActionListener readContextActionListener = invocation.getArgument(1); @@ -76,7 +79,7 @@ public void testReadBlobAsync() throws Exception { assertEquals(1, response.getNumberOfParts()); assertEquals(size, response.getBlobSize()); - InputStreamContainer responseContainer = response.getPartStreams().get(0); + InputStreamContainer responseContainer = response.getPartStreams().get(0).get().join(); assertEquals(0, responseContainer.getOffset()); assertEquals(size, responseContainer.getContentLength()); assertEquals(100, responseContainer.getInputStream().available()); @@ -99,7 +102,8 @@ public void testReadBlobAsyncException() throws Exception { final InputStreamContainer inputStreamContainer = new InputStreamContainer(new ByteArrayInputStream(data), data.length, 0); final ListenerTestUtils.CountingCompletionListener completionListener = new ListenerTestUtils.CountingCompletionListener<>(); - final ReadContext readContext = new ReadContext(size, List.of(inputStreamContainer), null); + final CompletableFuture streamContainerFuture = CompletableFuture.completedFuture(inputStreamContainer); + final ReadContext readContext = new ReadContext(size, List.of(() -> streamContainerFuture), null); Mockito.doAnswer(invocation -> { ActionListener readContextActionListener = invocation.getArgument(1); diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListenerTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListenerTests.java deleted file mode 100644 index fa13d90f42fa6..0000000000000 --- a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListenerTests.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.blobstore.stream.read.listener; - -import org.opensearch.test.OpenSearchTestCase; - -import java.io.IOException; - -import static org.opensearch.common.blobstore.stream.read.listener.ListenerTestUtils.CountingCompletionListener; - -public class FileCompletionListenerTests extends OpenSearchTestCase { - - public void testFileCompletionListener() { - int numStreams = 10; - String fileName = "test_segment_file"; - CountingCompletionListener completionListener = new CountingCompletionListener(); - FileCompletionListener fileCompletionListener = new FileCompletionListener(numStreams, fileName, completionListener); - - for (int stream = 0; stream < numStreams; stream++) { - // Ensure completion listener called only when all streams are completed - assertEquals(0, completionListener.getResponseCount()); - fileCompletionListener.onResponse(null); - } - - assertEquals(1, completionListener.getResponseCount()); - assertEquals(fileName, completionListener.getResponse()); - } - - public void testFileCompletionListenerFailure() { - int numStreams = 10; - String fileName = "test_segment_file"; - CountingCompletionListener completionListener = new CountingCompletionListener(); - FileCompletionListener fileCompletionListener = new FileCompletionListener(numStreams, fileName, completionListener); - - // Fail the listener initially - IOException exception = new IOException(); - fileCompletionListener.onFailure(exception); - - for (int stream = 0; stream < numStreams - 1; stream++) { - assertEquals(0, completionListener.getResponseCount()); - fileCompletionListener.onResponse(null); - } - - assertEquals(1, completionListener.getFailureCount()); - assertEquals(exception, completionListener.getException()); - assertEquals(0, completionListener.getResponseCount()); - - fileCompletionListener.onFailure(exception); - assertEquals(2, completionListener.getFailureCount()); - assertEquals(exception, completionListener.getException()); - } -} diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java index 811566eb5767b..f2a758b9bbe10 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java @@ -13,14 +13,11 @@ import org.junit.Before; import java.io.ByteArrayInputStream; -import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.opensearch.common.blobstore.stream.read.listener.ListenerTestUtils.CountingCompletionListener; +import java.util.function.UnaryOperator; public class FilePartWriterTests extends OpenSearchTestCase { @@ -34,130 +31,37 @@ public void init() throws Exception { public void testFilePartWriter() throws Exception { Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); int contentLength = 100; - int partNumber = 1; InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, inputStream.available(), 0); - AtomicBoolean anyStreamFailed = new AtomicBoolean(); - CountingCompletionListener fileCompletionListener = new CountingCompletionListener<>(); - FilePartWriter filePartWriter = new FilePartWriter( - partNumber, - inputStreamContainer, - segmentFilePath, - anyStreamFailed, - fileCompletionListener - ); - filePartWriter.run(); + FilePartWriter.write(segmentFilePath, inputStreamContainer, UnaryOperator.identity()); assertTrue(Files.exists(segmentFilePath)); assertEquals(contentLength, Files.size(segmentFilePath)); - assertEquals(1, fileCompletionListener.getResponseCount()); - assertEquals(Integer.valueOf(partNumber), fileCompletionListener.getResponse()); } public void testFilePartWriterWithOffset() throws Exception { Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); int contentLength = 100; int offset = 10; - int partNumber = 1; InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, inputStream.available(), offset); - AtomicBoolean anyStreamFailed = new AtomicBoolean(); - CountingCompletionListener fileCompletionListener = new CountingCompletionListener<>(); - FilePartWriter filePartWriter = new FilePartWriter( - partNumber, - inputStreamContainer, - segmentFilePath, - anyStreamFailed, - fileCompletionListener - ); - filePartWriter.run(); + FilePartWriter.write(segmentFilePath, inputStreamContainer, UnaryOperator.identity()); assertTrue(Files.exists(segmentFilePath)); assertEquals(contentLength + offset, Files.size(segmentFilePath)); - assertEquals(1, fileCompletionListener.getResponseCount()); - assertEquals(Integer.valueOf(partNumber), fileCompletionListener.getResponse()); } public void testFilePartWriterLargeInput() throws Exception { Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); int contentLength = 20 * 1024 * 1024; - int partNumber = 1; InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, contentLength, 0); - AtomicBoolean anyStreamFailed = new AtomicBoolean(); - CountingCompletionListener fileCompletionListener = new CountingCompletionListener<>(); - FilePartWriter filePartWriter = new FilePartWriter( - partNumber, - inputStreamContainer, - segmentFilePath, - anyStreamFailed, - fileCompletionListener - ); - filePartWriter.run(); + FilePartWriter.write(segmentFilePath, inputStreamContainer, UnaryOperator.identity()); assertTrue(Files.exists(segmentFilePath)); assertEquals(contentLength, Files.size(segmentFilePath)); - - assertEquals(1, fileCompletionListener.getResponseCount()); - assertEquals(Integer.valueOf(partNumber), fileCompletionListener.getResponse()); - } - - public void testFilePartWriterException() throws Exception { - Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); - int contentLength = 100; - int partNumber = 1; - InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); - InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, contentLength, 0); - AtomicBoolean anyStreamFailed = new AtomicBoolean(); - CountingCompletionListener fileCompletionListener = new CountingCompletionListener<>(); - - IOException ioException = new IOException(); - FilePartWriter filePartWriter = new FilePartWriter( - partNumber, - inputStreamContainer, - segmentFilePath, - anyStreamFailed, - fileCompletionListener - ); - assertFalse(anyStreamFailed.get()); - filePartWriter.processFailure(ioException); - - assertTrue(anyStreamFailed.get()); - assertFalse(Files.exists(segmentFilePath)); - - // Fail stream again to simulate another stream failure for same file - filePartWriter.processFailure(ioException); - - assertTrue(anyStreamFailed.get()); - assertFalse(Files.exists(segmentFilePath)); - - assertEquals(0, fileCompletionListener.getResponseCount()); - assertEquals(1, fileCompletionListener.getFailureCount()); - assertEquals(ioException, fileCompletionListener.getException()); - } - - public void testFilePartWriterStreamFailed() throws Exception { - Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); - int contentLength = 100; - int partNumber = 1; - InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); - InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, inputStream.available(), 0); - AtomicBoolean anyStreamFailed = new AtomicBoolean(true); - CountingCompletionListener fileCompletionListener = new CountingCompletionListener<>(); - - FilePartWriter filePartWriter = new FilePartWriter( - partNumber, - inputStreamContainer, - segmentFilePath, - anyStreamFailed, - fileCompletionListener - ); - filePartWriter.run(); - - assertFalse(Files.exists(segmentFilePath)); - assertEquals(0, fileCompletionListener.getResponseCount()); } } diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java index 21b7b47390a9b..7e4c96cbadcda 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java @@ -29,7 +29,9 @@ import java.util.ArrayList; import java.util.List; import java.util.UUID; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.function.UnaryOperator; import static org.opensearch.common.blobstore.stream.read.listener.ListenerTestUtils.CountingCompletionListener; @@ -46,6 +48,7 @@ public class ReadContextListenerTests extends OpenSearchTestCase { private static final int NUMBER_OF_PARTS = 5; private static final int PART_SIZE = 10; private static final String TEST_SEGMENT_FILE = "test_segment_file"; + private static final int MAX_CONCURRENT_STREAMS = 10; @BeforeClass public static void setup() { @@ -64,10 +67,17 @@ public void init() throws Exception { public void testReadContextListener() throws InterruptedException, IOException { Path fileLocation = path.resolve(UUID.randomUUID().toString()); - List blobPartStreams = initializeBlobPartStreams(); + List blobPartStreams = initializeBlobPartStreams(); CountDownLatch countDownLatch = new CountDownLatch(1); ActionListener completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); - ReadContextListener readContextListener = new ReadContextListener(TEST_SEGMENT_FILE, fileLocation, threadPool, completionListener); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); ReadContext readContext = new ReadContext((long) PART_SIZE * NUMBER_OF_PARTS, blobPartStreams, null); readContextListener.onResponse(readContext); @@ -79,10 +89,17 @@ public void testReadContextListener() throws InterruptedException, IOException { public void testReadContextListenerFailure() throws Exception { Path fileLocation = path.resolve(UUID.randomUUID().toString()); - List blobPartStreams = initializeBlobPartStreams(); + List blobPartStreams = initializeBlobPartStreams(); CountDownLatch countDownLatch = new CountDownLatch(1); ActionListener completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); - ReadContextListener readContextListener = new ReadContextListener(TEST_SEGMENT_FILE, fileLocation, threadPool, completionListener); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); InputStream badInputStream = new InputStream() { @Override @@ -101,7 +118,13 @@ public int available() { } }; - blobPartStreams.add(NUMBER_OF_PARTS, new InputStreamContainer(badInputStream, PART_SIZE, PART_SIZE * NUMBER_OF_PARTS)); + blobPartStreams.add( + NUMBER_OF_PARTS, + () -> CompletableFuture.supplyAsync( + () -> new InputStreamContainer(badInputStream, PART_SIZE, PART_SIZE * NUMBER_OF_PARTS), + threadPool.generic() + ) + ); ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS, blobPartStreams, null); readContextListener.onResponse(readContext); @@ -112,18 +135,31 @@ public int available() { public void testReadContextListenerException() { Path fileLocation = path.resolve(UUID.randomUUID().toString()); CountingCompletionListener listener = new CountingCompletionListener(); - ReadContextListener readContextListener = new ReadContextListener(TEST_SEGMENT_FILE, fileLocation, threadPool, listener); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + listener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); IOException exception = new IOException(); readContextListener.onFailure(exception); assertEquals(1, listener.getFailureCount()); assertEquals(exception, listener.getException()); } - private List initializeBlobPartStreams() { - List blobPartStreams = new ArrayList<>(); + private List initializeBlobPartStreams() { + List blobPartStreams = new ArrayList<>(); for (int partNumber = 0; partNumber < NUMBER_OF_PARTS; partNumber++) { InputStream testStream = new ByteArrayInputStream(randomByteArrayOfLength(PART_SIZE)); - blobPartStreams.add(new InputStreamContainer(testStream, PART_SIZE, (long) partNumber * PART_SIZE)); + int finalPartNumber = partNumber; + blobPartStreams.add( + () -> CompletableFuture.supplyAsync( + () -> new InputStreamContainer(testStream, PART_SIZE, (long) finalPartNumber * PART_SIZE), + threadPool.generic() + ) + ); } return blobPartStreams; } diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index a1d6be84c9926..bbd73bcf97aab 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -105,6 +105,7 @@ import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; @@ -257,7 +258,7 @@ private IndexService newIndexService(IndexModule module) throws IOException { writableRegistry(), () -> false, null, - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool, DefaultRecoverySettings.INSTANCE), translogFactorySupplier, () -> IndexSettings.DEFAULT_REFRESH_INTERVAL, () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 5a13f57db2c87..941f2f48e71af 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -33,6 +33,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; @@ -155,7 +156,8 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { remoteMetadataDirectory, mock(RemoteStoreLockManager.class), mock(ThreadPool.class), - shardId + shardId, + DefaultRecoverySettings.INSTANCE ); FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory( new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory) diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java index cad5e47531cc6..78c7fe64cebd9 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java @@ -20,6 +20,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -57,7 +58,11 @@ public void setup() { repositoriesService = mock(RepositoriesService.class); threadPool = mock(ThreadPool.class); when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); - remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(repositoriesServiceSupplier, threadPool); + remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( + repositoriesServiceSupplier, + threadPool, + DefaultRecoverySettings.INSTANCE + ); } public void testNewDirectory() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index f154dddb0e7cc..b574ccaac55e1 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -25,12 +25,13 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.io.InputStreamContainer; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; @@ -41,6 +42,7 @@ import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; import org.junit.After; @@ -56,9 +58,11 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; +import java.util.function.UnaryOperator; import org.mockito.Mockito; @@ -145,13 +149,16 @@ public void setup() throws IOException { remoteMetadataDirectory, mdLockManager, threadPool, - indexShard.shardId() + indexShard.shardId(), + DefaultRecoverySettings.INSTANCE ); try (Store store = indexShard.store()) { segmentInfos = store.readLastCommittedSegmentsInfo(); } + when(remoteDataDirectory.getDownloadRateLimiter()).thenReturn(UnaryOperator.identity()); when(threadPool.executor(ThreadPool.Names.REMOTE_PURGE)).thenReturn(executorService); + when(threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY)).thenReturn(executorService); } @After @@ -562,9 +569,6 @@ public void onFailure(Exception e) {} } public void testCopyFilesToMultipart() throws Exception { - Settings settings = Settings.builder().build(); - FeatureFlags.initializeFeatureFlags(settings); - String filename = "_0.cfe"; populateMetadata(); remoteSegmentStoreDirectory.init(); @@ -574,13 +578,15 @@ public void testCopyFilesToMultipart() throws Exception { when(remoteDataDirectory.getBlobContainer()).thenReturn(blobContainer); Mockito.doAnswer(invocation -> { - ActionListener completionListener = invocation.getArgument(3); - completionListener.onResponse(invocation.getArgument(0)); + ActionListener completionListener = invocation.getArgument(1); + final CompletableFuture future = new CompletableFuture<>(); + future.complete(new InputStreamContainer(new ByteArrayInputStream(new byte[] { 42 }), 0, 1)); + completionListener.onResponse(new ReadContext(1, List.of(() -> future), "")); return null; - }).when(blobContainer).asyncBlobDownload(any(), any(), any(), any()); + }).when(blobContainer).readBlobAsync(any(), any()); CountDownLatch downloadLatch = new CountDownLatch(1); - ActionListener completionListener = new ActionListener() { + ActionListener completionListener = new ActionListener<>() { @Override public void onResponse(String unused) { downloadLatch.countDown(); @@ -592,7 +598,7 @@ public void onFailure(Exception e) {} Path path = createTempDir(); remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, path, completionListener); assertTrue(downloadLatch.await(5000, TimeUnit.SECONDS)); - verify(blobContainer, times(1)).asyncBlobDownload(contains(filename), eq(path.resolve(filename)), any(), any()); + verify(blobContainer, times(1)).readBlobAsync(contains(filename), any()); verify(storeDirectory, times(0)).copyFrom(any(), any(), any(), any()); } @@ -678,7 +684,8 @@ public void testCopyFilesFromMultipartIOException() throws Exception { remoteMetadataDirectory, mdLockManager, threadPool, - indexShard.shardId() + indexShard.shardId(), + DefaultRecoverySettings.INSTANCE ); populateMetadata(); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 09f5c1bea1a5e..80731b378f369 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -195,6 +195,7 @@ import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; @@ -2066,7 +2067,7 @@ public void onFailure(final Exception e) { emptyMap(), null, emptyMap(), - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool, DefaultRecoverySettings.INSTANCE), repositoriesServiceReference::get, fileCacheCleaner, null, diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 466c00d0648dc..186c1c7e78f6b 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -119,6 +119,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.indices.recovery.AsyncRecoveryTarget; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; import org.opensearch.indices.recovery.RecoveryResponse; @@ -640,7 +641,7 @@ protected IndexShard newShard( Collections.emptyList(), clusterSettings ); - Store remoteStore = null; + Store remoteStore; RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = null; RepositoriesService mockRepoSvc = mock(RepositoriesService.class); @@ -659,6 +660,8 @@ protected IndexShard newShard( remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, indexSettings.getSettings()); BlobStoreRepository repo = createRepository(remotePath); when(mockRepoSvc.repository(any())).thenAnswer(invocationOnMock -> repo); + } else { + remoteStore = null; } final BiFunction translogFactorySupplier = (settings, shardRouting) -> { @@ -698,7 +701,8 @@ protected IndexShard newShard( remoteStore, remoteStoreStatsTrackerFactory, () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, - "dummy-node" + "dummy-node", + null ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); if (remoteStoreStatsTrackerFactory != null) { @@ -785,7 +789,14 @@ protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId RemoteStoreLockManager remoteStoreLockManager = new RemoteStoreMetadataLockManager( new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex())) ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool, shardId); + return new RemoteSegmentStoreDirectory( + dataDirectory, + metadataDirectory, + remoteStoreLockManager, + threadPool, + shardId, + DefaultRecoverySettings.INSTANCE + ); } private RemoteDirectory newRemoteDirectory(Path f) throws IOException { diff --git a/test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java b/test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java new file mode 100644 index 0000000000000..359668f5dad71 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.recovery; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; + +/** + * Utility to provide a {@link RecoverySettings} instance containing all defaults + */ +public final class DefaultRecoverySettings { + private DefaultRecoverySettings() {} + + public static final RecoverySettings INSTANCE = new RecoverySettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); +} From de8511b27487ec90a1632f59ad91940374c9c862 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Wed, 4 Oct 2023 18:36:07 -0500 Subject: [PATCH 26/26] Mute DeleteSnapshotIT.testDeleteShallowCopySnapshot (#10372) Signed-off-by: Andrew Ross --- .../java/org/opensearch/snapshots/DeleteSnapshotIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index e79bf1c16b586..31abc16bba50e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -64,6 +64,7 @@ public void testDeleteSnapshot() throws Exception { assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == 0); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9115") public void testDeleteShallowCopySnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); final Path remoteStoreRepoPath = randomRepoPath();