Skip to content

Commit

Permalink
cleanup old comments
Browse files Browse the repository at this point in the history
Signed-off-by: Maciej Mierzwa <[email protected]>
  • Loading branch information
MaciejMierzwa committed Nov 15, 2023
1 parent d7c8358 commit f8f7ea0
Showing 1 changed file with 5 additions and 76 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ public class SearchOperationTest {
.on(INDICES_ON_WHICH_USER_CAN_PERFORM_INDEX_OPERATIONS_PREFIX.concat("*"))
);

private static User USER_ALLOWED_TO_CREATE_INDEX = new User("user-allowed-to-create-index").roles(
private static final User USER_ALLOWED_TO_CREATE_INDEX = new User("user-allowed-to-create-index").roles(
new Role("create-index-role").indexPermissions("indices:admin/create").on("*")
);

Expand Down Expand Up @@ -465,7 +465,7 @@ public void cleanData() throws ExecutionException, InterruptedException {
if (indicesExistsResponse.isExists()) {
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexToBeDeleted);
indices.delete(deleteIndexRequest).actionGet();
Awaitility.await().ignoreExceptions().until(() -> indices.exists(indicesExistsRequest).get().isExists() == false);
Awaitility.await().ignoreExceptions().until(() -> !indices.exists(indicesExistsRequest).get().isExists());
}
}

Expand Down Expand Up @@ -1118,15 +1118,6 @@ public void shouldUpdateDocumentsInBulk_negative() throws IOException {

@Test
public void shouldDeleteDocumentInBulk_positive() throws IOException {
/**
Proof of concept changes to make tests stable. The problem was caused by nodes rotation in this method:
{@link org.opensearch.client.RestClient#performRequest(Request request)} which uses {@link org.opensearch.client.RestClient#nextNodes()}
to iterate through nodes, in each http call. This would cause different amount of audit log messages depending on node hit by http request.
the process is:
1. create 5-node cluster, with 3 cluster managers
2. create 2-shard, 2-replica index, find out which node holds the data
3. create second high level rest client, using node found in previous step, for all calls. Minimize transport requests between cluster nodes.
*/
// create index
Settings sourceIndexSettings = Settings.builder().put("index.number_of_replicas", 2).put("index.number_of_shards", 2).build();
IndexOperationsHelper.createIndex(cluster, WRITE_SONG_INDEX_NAME, sourceIndexSettings);
Expand Down Expand Up @@ -1156,7 +1147,6 @@ public void shouldDeleteDocumentInBulk_positive() throws IOException {
}
auditLogsRule.assertExactly(2, userAuthenticated(LIMITED_WRITE_USER).withRestRequest(POST, "/_bulk"));
auditLogsRule.assertExactly(2, grantedPrivilege(LIMITED_WRITE_USER, "BulkRequest"));
// todo 13l 12l
auditLogsRule.assertExactly(4, auditPredicate(null).withLayer(AuditLog.Origin.TRANSPORT));
auditLogsRule.assertAtLeastTransportMessages(4, grantedPrivilege(LIMITED_WRITE_USER, "PutMappingRequest"));
auditLogsRule.assertAtLeastTransportMessages(4, auditPredicate(INDEX_EVENT).withEffectiveUser(LIMITED_WRITE_USER));
Expand All @@ -1181,66 +1171,6 @@ private static void getAndSetPrimaryNode() throws IOException {
}
}

@Test
public void shouldDeleteDocumentInBulk_positiveTransportRequests() throws IOException {
/**
Proof of concept changes to make tests stable. The problem was caused by nodes rotation in this method:
{@link org.opensearch.client.RestClient#performRequest(Request request)} which uses {@link org.opensearch.client.RestClient#nextNodes()}
to iterate through nodes, in each http call. This would cause different amount of audit log messages depending on node hit by http request.
the process is:
1. create 5-node cluster, with 3 cluster managers
2. create 2-shard, 2-replica index, find out which node is manager, doesn't hold data
3. create second high level rest client, using node found in previous step, for all calls. Maximize transport requests between cluster nodes.
*/
// create index
Settings sourceIndexSettings = Settings.builder().put("index.number_of_replicas", 2).put("index.number_of_shards", 2).build();
IndexOperationsHelper.createIndex(cluster, WRITE_SONG_INDEX_NAME, sourceIndexSettings);
try (RestHighLevelClient restHighLevelClient = cluster.getRestHighLevelClient(ADMIN_USER)) {
// cat shards, get index and node name
// getNodeByName()
// get rest client of node
Request getIndicesRequest = new Request("GET", "/_cat/nodes?v");
// High level client doesn't support _cat/shards API
Response getIndicesResponse = restHighLevelClient.getLowLevelClient().performRequest(getIndicesRequest);
String primaryNode = new BufferedReader(new InputStreamReader(getIndicesResponse.getEntity().getContent())).lines()
.map(s -> s.split("\\s+"))
.filter(strings -> strings[9].equals("*"))
.map(strings -> strings[10])
.findAny()
.orElseThrow(() -> new IllegalStateException("no cluster manager found"));
cluster.setPrimaryNode(primaryNode);
}

try (RestHighLevelClient restHighLevelClient = cluster.getRestHighLevelClient(LIMITED_WRITE_USER)) {

BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(IMMEDIATE);
bulkRequest.add(new IndexRequest(WRITE_SONG_INDEX_NAME).id("one").source(SONGS[0].asMap()));
bulkRequest.add(new IndexRequest(WRITE_SONG_INDEX_NAME).id("two").source(SONGS[1].asMap()));
bulkRequest.add(new IndexRequest(WRITE_SONG_INDEX_NAME).id("three").source(SONGS[2].asMap()));
bulkRequest.add(new IndexRequest(WRITE_SONG_INDEX_NAME).id("four").source(SONGS[3].asMap()));
assertThat(restHighLevelClient.bulk(bulkRequest, DEFAULT), successBulkResponse());
bulkRequest = new BulkRequest().setRefreshPolicy(IMMEDIATE);
bulkRequest.add(new DeleteRequest(WRITE_SONG_INDEX_NAME, "one"));
bulkRequest.add(new DeleteRequest(WRITE_SONG_INDEX_NAME, "three"));

BulkResponse response = restHighLevelClient.bulk(bulkRequest, DEFAULT);

assertThat(response, successBulkResponse());
assertThat(internalClient, not(clusterContainsDocument(WRITE_SONG_INDEX_NAME, "one")));
assertThat(internalClient, not(clusterContainsDocument(WRITE_SONG_INDEX_NAME, "three")));
assertThat(
internalClient,
clusterContainsDocumentWithFieldValue(WRITE_SONG_INDEX_NAME, "two", FIELD_TITLE, TITLE_SONG_1_PLUS_1)
);
assertThat(internalClient, clusterContainsDocumentWithFieldValue(WRITE_SONG_INDEX_NAME, "four", FIELD_TITLE, TITLE_POISON));
}
auditLogsRule.assertExactly(2, userAuthenticated(LIMITED_WRITE_USER).withRestRequest(POST, "/_bulk"));
auditLogsRule.assertExactly(2, grantedPrivilege(LIMITED_WRITE_USER, "BulkRequest"));
auditLogsRule.assertExactly(5, auditPredicate(null).withLayer(AuditLog.Origin.TRANSPORT));
auditLogsRule.assertAtLeastTransportMessages(4, grantedPrivilege(LIMITED_WRITE_USER, "PutMappingRequest"));
auditLogsRule.assertAtLeastTransportMessages(4, auditPredicate(INDEX_EVENT).withEffectiveUser(LIMITED_WRITE_USER));
}

@Test
public void shouldDeleteDocumentInBulk_partiallyPositive() throws IOException {
Settings indexSettings = Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", 1).build();
Expand Down Expand Up @@ -1590,7 +1520,7 @@ public void shouldUpdateTemplate_positive() throws IOException {
restHighLevelClient.indices().putTemplate(request, DEFAULT);
assertThat(internalClient, clusterContainTemplate(MUSICAL_INDEX_TEMPLATE));
request = new PutIndexTemplateRequest(MUSICAL_INDEX_TEMPLATE).patterns(List.of(TEMPLATE_INDEX_PREFIX))
.alias(new Alias(ALIAS_USED_IN_MUSICAL_INDEX_TEMPLATE_0003));// TODO add healthcheck for template await
.alias(new Alias(ALIAS_USED_IN_MUSICAL_INDEX_TEMPLATE_0003));

var response = restHighLevelClient.indices().putTemplate(request, DEFAULT);

Expand All @@ -1599,8 +1529,7 @@ public void shouldUpdateTemplate_positive() throws IOException {
String documentId = "000one";
IndexRequest indexRequest = new IndexRequest(INDEX_NAME_SONG_TRANSCRIPTION_JAZZ).id(documentId)
.source(SONGS[0].asMap())
.setRefreshPolicy(IMMEDIATE);// TODO maybe?? add healthcheck for index creation, might not be required because of IMMEDIATE
// flag
.setRefreshPolicy(IMMEDIATE);
restHighLevelClient.index(indexRequest, DEFAULT);
assertThat(internalClient, clusterContainTemplate(MUSICAL_INDEX_TEMPLATE));
assertThat(internalClient, clusterContainsDocument(ALIAS_USED_IN_MUSICAL_INDEX_TEMPLATE_0003, documentId));
Expand Down Expand Up @@ -1758,7 +1687,7 @@ public void shouldDeleteSnapshotRepository_negative() throws IOException {
auditLogsRule.assertExactlyOne(missingPrivilege(LIMITED_READ_USER, "DeleteRepositoryRequest"));
}

@Test // Bug which can be reproduced with the below test: https://github.com/opensearch-project/security/issues/2169
@Test
public void shouldCreateSnapshot_positive() throws IOException {
final String snapshotName = "snapshot-positive-test";
long snapshotGetCount;
Expand Down

0 comments on commit f8f7ea0

Please sign in to comment.