Skip to content

Commit

Permalink
[Integ-tests] Improve ARNs in integration tests to be compatible with…
Browse files Browse the repository at this point in the history
… other AWS partitions

AD test, Slurm accounting, and tranium tests are not changed. Because we do not need to let them run in other partitions for now

Signed-off-by: Hanwen <[email protected]>
  • Loading branch information
hanwen-cluster committed Mar 3, 2023
1 parent c7c1751 commit 19f4b8c
Show file tree
Hide file tree
Showing 12 changed files with 26 additions and 18 deletions.
2 changes: 2 additions & 0 deletions tests/integration-tests/clusters_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
ClusterCreationError,
dict_add_nested_key,
get_cfn_events,
get_arn_partition,
get_stack_id_tag_filter,
kebab_case,
retrieve_cfn_outputs,
Expand Down Expand Up @@ -51,6 +52,7 @@ def __init__(self, name, ssh_key, config_file, region, custom_cli_credentials=No
self.config_file = config_file
self.ssh_key = ssh_key
self.region = region
self.partition = get_arn_partition(region)
with open(config_file, encoding="utf-8") as conf_file:
self.config = yaml.safe_load(conf_file)
self.has_been_deleted = False
Expand Down
1 change: 1 addition & 0 deletions tests/integration-tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -774,6 +774,7 @@ def _get_default_template_values(vpc_stack: CfnVpcStack, request):
default_values["private_subnet_id"] = vpc_stack.get_private_subnet()
default_values["private_subnet_ids"] = vpc_stack.get_all_private_subnets()
default_values.update({dimension: request.node.funcargs.get(dimension) for dimension in DIMENSIONS_MARKER_ARGS})
default_values["partition"] = get_arn_partition(default_values["region"])
default_values["key_name"] = request.config.getoption("key_name")

if default_values.get("scheduler") in request.config.getoption("tests_config", default={}).get(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -310,13 +310,13 @@ def _test_pcluster_export_cluster_logs(s3_bucket_factory, cluster):
{
"Action": "s3:GetBucketAcl",
"Effect": "Allow",
"Resource": f"arn:aws:s3:::{bucket_name}",
"Resource": f"arn:{cluster.partition}:s3:::{bucket_name}",
"Principal": {"Service": f"logs.{cluster.region}.amazonaws.com"},
},
{
"Action": "s3:PutObject",
"Effect": "Allow",
"Resource": f"arn:aws:s3:::{bucket_name}/*",
"Resource": f"arn:{cluster.partition}:s3:::{bucket_name}/*",
"Condition": {"StringEquals": {"s3:x-amz-acl": "bucket-owner-full-control"}},
"Principal": {"Service": f"logs.{cluster.region}.amazonaws.com"},
},
Expand Down
5 changes: 3 additions & 2 deletions tests/integration-tests/tests/createami/test_createami.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,9 +377,10 @@ def _custom_resource(image_id):
custom_resource_template.set_description("Create build image custom resource stack")

# Create a instance role
partition = get_arn_partition(region)
managed_policy_arns = [
"arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore",
"arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilder",
f"arn:{partition}:iam::aws:policy/AmazonSSMManagedInstanceCore",
f"arn:{partition}:iam::aws:policy/EC2InstanceProfileForImageBuilder",
]

policy_document = iam.Policy(
Expand Down
16 changes: 9 additions & 7 deletions tests/integration-tests/tests/iam/test_iam.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from s3_common_utils import check_s3_read_resource, check_s3_read_write_resource, get_policy_resources
from troposphere.iam import ManagedPolicy
from troposphere.template_generator import TemplateGenerator
from utils import generate_stack_name, wait_for_computefleet_changed
from utils import generate_stack_name, get_arn_partition, wait_for_computefleet_changed

from tests.common.assertions import assert_no_errors_in_logs
from tests.schedulers.test_awsbatch import _test_job_submission as _test_job_submission_awsbatch
Expand Down Expand Up @@ -291,7 +291,9 @@ def _get_resource_name_from_resource_arn(resource_arn):
@pytest.mark.usefixtures("os", "instance")
def test_iam_policies(region, scheduler, pcluster_config_reader, clusters_factory):
"""Test IAM Policies"""
cluster_config = pcluster_config_reader(iam_policies=["arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"])
cluster_config = pcluster_config_reader(
iam_policies=[f"arn:{get_arn_partition(region)}:iam::aws:policy/AmazonS3ReadOnlyAccess"]
)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)

Expand Down Expand Up @@ -365,7 +367,7 @@ def test_iam_resource_prefix(
)

cluster = clusters_factory(cluster_config, custom_cli_credentials=creds)
_test_iam_resource_in_cluster(cfn_client, iam_client, cluster.name, iam_resource_prefix)
_test_iam_resource_in_cluster(region, cfn_client, iam_client, cluster.name, iam_resource_prefix)


def _update_paramters_and_conditions(parameters, conditions, iam_path, iam_name_prefix):
Expand Down Expand Up @@ -706,11 +708,11 @@ def _split_resource_prefix(resource_prefix):
return None, None


def _check_iam_resource_prefix(resource_arn_list, iam_resource_prefix):
def _check_iam_resource_prefix(region, resource_arn_list, iam_resource_prefix):
"""Check the path and name of IAM resource ( Roles, policy and Instance profiles)."""
iam_path, iam_name_prefix = _split_resource_prefix(iam_resource_prefix)
for resource in resource_arn_list:
if "arn:aws:iam:" in resource:
if f"arn:{get_arn_partition(region)}:iam:" in resource:
if iam_path:
assert_that(resource).contains(iam_path)
else:
Expand All @@ -719,7 +721,7 @@ def _check_iam_resource_prefix(resource_arn_list, iam_resource_prefix):
assert_that(resource).contains(iam_name_prefix)


def _test_iam_resource_in_cluster(cfn_client, iam_client, stack_name, iam_resource_prefix):
def _test_iam_resource_in_cluster(region, cfn_client, iam_client, stack_name, iam_resource_prefix):
"""Test IAM resources by checking the path and name prefix in AWS IAM and check cluster is created."""

# Check for cluster Status
Expand All @@ -744,7 +746,7 @@ def _test_iam_resource_in_cluster(cfn_client, iam_client, stack_name, iam_resour
"Arn"
]
)
_check_iam_resource_prefix(resource_arn_list, iam_resource_prefix)
_check_iam_resource_prefix(region, resource_arn_list, iam_resource_prefix)


@pytest.fixture(scope="class")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ HeadNode:
KeyName: {{ key_name }}
Iam:
AdditionalIamPolicies:
- Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore
- Policy: arn:{{partition}}:iam::aws:policy/AmazonSSMManagedInstanceCore
Scheduling:
Scheduler: slurm
SlurmQueues:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ HeadNode:
KeyName: {{ key_name }}
Iam:
AdditionalIamPolicies:
- Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore
- Policy: arn:{{partition}}:iam::aws:policy/AmazonSSMManagedInstanceCore
Scheduling:
Scheduler: slurm
SlurmQueues:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ HeadNode:
KeyName: {{ key_name }}
Iam:
AdditionalIamPolicies:
- Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore
- Policy: arn:{{partition}}:iam::aws:policy/AmazonSSMManagedInstanceCore
Scheduling:
Scheduler: slurm
SlurmSettings:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ HeadNode:
KeyName: {{ key_name }}
Iam:
AdditionalIamPolicies:
- Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore
- Policy: arn:{{partition}}:iam::aws:policy/AmazonSSMManagedInstanceCore
Scheduling:
Scheduler: slurm
SlurmSettings:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ HeadNode:
KeyName: {{ key_name }}
Iam:
AdditionalIamPolicies:
- Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore
- Policy: arn:{{partition}}:iam::aws:policy/AmazonSSMManagedInstanceCore
Scheduling:
Scheduler: slurm
SlurmQueues:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ HeadNode:
MountDir: {{ head_ephemeral_mount }}
Iam:
AdditionalIamPolicies:
- Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore
- Policy: arn:{{partition}}:iam::aws:policy/AmazonSSMManagedInstanceCore
Scheduling:
Scheduler: {{ scheduler }}
{% if scheduler == "awsbatch" %}AwsBatchQueues:{% else %}SlurmQueues:{% endif %}
Expand Down
4 changes: 3 additions & 1 deletion tests/integration-tests/tests/update/test_update.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,9 @@ def test_update_slurm(region, pcluster_config_reader, s3_bucket_factory, cluster
job_id = slurm_commands.assert_job_submitted(result.stdout)

# Update cluster with new configuration
additional_policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonAppStreamServiceAccess"
additional_policy_arn = (
f"arn:{utils.get_arn_partition(region)}:iam::aws:policy/service-role/AmazonAppStreamServiceAccess"
)
updated_config_file = pcluster_config_reader(
config_file="pcluster.config.update.yaml",
output_file="pcluster.config.update.successful.yaml",
Expand Down

0 comments on commit 19f4b8c

Please sign in to comment.