diff --git a/README.md b/README.md index bcd54d887..995792549 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,8 @@ For more information about the default configuration, see [Default Secure Landin ## Overview * [terraform-ibm-landing-zone](#terraform-ibm-landing-zone) * [Examples](./examples) - * [One VPC with one VSI example](./examples/one-vpc-one-vsi) + * [One VPC with one VSI](./examples/one-vpc-one-vsi) + * [One VSI with one IKS](./examples/one-vsi-one-iks) * [Override.json example](./examples/override-example) * [Contributing](#contributing) @@ -836,6 +837,44 @@ module "cluster_pattern" { } ``` +## Required IAM access policies + + + + + + + + + + + +## Examples + +- [ One VPC with one VSI](examples/one-vpc-one-vsi) +- [ One VSI with one IKS](examples/one-vsi-one-iks) +- [ Override.json example](examples/override-example) + + ### Requirements @@ -851,6 +890,7 @@ module "cluster_pattern" { | Name | Source | Version | |------|--------|---------| | [bastion\_host](#module\_bastion\_host) | terraform-ibm-modules/landing-zone-vsi/ibm | 3.3.0 | +| [cluster](#module\_cluster) | terraform-ibm-modules/base-ocp-vpc/ibm | 3.27.0 | | [dynamic\_values](#module\_dynamic\_values) | ./dynamic_values | n/a | | [f5\_vsi](#module\_f5\_vsi) | terraform-ibm-modules/landing-zone-vsi/ibm | 3.3.0 | | [key\_management](#module\_key\_management) | ./kms | n/a | @@ -908,8 +948,8 @@ module "cluster_pattern" { |------|-------------|------|---------|:--------:| | [appid](#input\_appid) | The App ID instance to be used for the teleport vsi deployments |
object({
name = optional(string)
resource_group = optional(string)
use_data = optional(bool)
keys = optional(list(string))
use_appid = bool
})
|
{
"use_appid": false
}
| no | | [atracker](#input\_atracker) | atracker variables |
object({
resource_group = string
receive_global_events = bool
collector_bucket_name = string
add_route = bool
})
| n/a | yes | -| [clusters](#input\_clusters) | A list describing clusters workloads to create |
list(
object({
name = string # Name of Cluster
vpc_name = string # Name of VPC
subnet_names = list(string) # List of vpc subnets for cluster
workers_per_subnet = number # Worker nodes per subnet.
machine_type = string # Worker node flavor
kube_type = string # iks or openshift
kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default`
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
pod_subnet = optional(string) # Portable subnet for pods
service_subnet = optional(string) # Portable subnet for services
resource_group = string # Resource Group used for cluster
cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters
access_tags = optional(list(string), [])
boot_volume_crk_name = optional(string) # Boot volume encryption key name
disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint
disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers
cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion
operating_system = optional(string, null) #The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed
addons = optional(object({ # Map of OCP cluster add-on versions to install
debug-tool = optional(string)
image-key-synchronizer = optional(string)
openshift-data-foundation = optional(string)
vpc-file-csi-driver = optional(string)
static-route = optional(string)
cluster-autoscaler = optional(string)
vpc-block-csi-driver = optional(string)
ibm-storage-operator = optional(string)
}), {})
manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources.
kms_config = optional(
object({
crk_name = string # Name of key
private_endpoint = optional(bool) # Private endpoint
})
)

worker_pools = optional(
list(
object({
name = string # Worker pool name
vpc_name = string # VPC name
workers_per_subnet = number # Worker nodes per subnet
flavor = string # Worker node flavor
subnet_names = list(string) # List of vpc subnets for worker pool
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
boot_volume_crk_name = optional(string) # Boot volume encryption key name
operating_system = optional(string) # The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
})
)
)
})
)
| n/a | yes | -| [cos](#input\_cos) | Object describing the cloud object storage instance, buckets, and keys. Set `use_data` to false to create instance |
list(
object({
name = string
use_data = optional(bool)
resource_group = string
plan = optional(string)
random_suffix = optional(bool) # Use a random suffix for COS instance
access_tags = optional(list(string), [])
skip_kms_s2s_auth_policy = optional(bool, false) # skip auth policy between this instance and kms instance, useful if existing resources are used
skip_flowlogs_s2s_auth_policy = optional(bool, false) # skip auth policy between flow logs service and this instance, set to true if this policy is already in place on account
skip_atracker_s2s_auth_policy = optional(bool, false) # skip auth policyt between atracker service and this instance, set to true if this is existing recipient of atracker already
buckets = list(object({
name = string
storage_class = string
endpoint_type = string
force_delete = bool
single_site_location = optional(string)
region_location = optional(string)
cross_region_location = optional(string)
kms_key = optional(string)
access_tags = optional(list(string), [])
allowed_ip = optional(list(string), [])
hard_quota = optional(number)
archive_rule = optional(object({
days = number
enable = bool
rule_id = optional(string)
type = string
}))
expire_rule = optional(object({
days = optional(number)
date = optional(string)
enable = bool
expired_object_delete_marker = optional(string)
prefix = optional(string)
rule_id = optional(string)
}))
activity_tracking = optional(object({
activity_tracker_crn = string
read_data_events = bool
write_data_events = bool
management_events = bool
}))
metrics_monitoring = optional(object({
metrics_monitoring_crn = string
request_metrics_enabled = optional(bool)
usage_metrics_enabled = optional(bool)
}))
}))
keys = optional(
list(object({
name = string
role = string
enable_HMAC = bool
}))
)

})
)
| n/a | yes | +| [clusters](#input\_clusters) | A list describing clusters workloads to create |
list(
object({
name = string # Name of Cluster
vpc_name = string # Name of VPC
subnet_names = list(string) # List of vpc subnets for cluster
workers_per_subnet = number # Worker nodes per subnet.
machine_type = string # Worker node flavor
kube_type = string # iks or openshift
kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default`
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
pod_subnet = optional(string) # Portable subnet for pods
service_subnet = optional(string) # Portable subnet for services
resource_group = string # Resource Group used for cluster
cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters
access_tags = optional(list(string), [])
boot_volume_crk_name = optional(string) # Boot volume encryption key name
disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint
disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers
cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion
verify_worker_network_readiness = optional(bool) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
use_private_endpoint = optional(bool, false) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
minimum_size = optional(number) # Minimum number of worker nodes per zone that the cluster autoscaler can scale down the worker pool to.
maximum_size = optional(number) # Maximum number of worker nodes per zone that the cluster autoscaler can scale up the worker pool to.
enable_autoscaling = optional(bool, false) # Flag to set cluster autoscaler to manage scaling for the worker pool.
addons = optional(object({ # Map of OCP cluster add-on versions to install
debug-tool = optional(string)
image-key-synchronizer = optional(string)
openshift-data-foundation = optional(string)
vpc-file-csi-driver = optional(string)
static-route = optional(string)
cluster-autoscaler = optional(string)
vpc-block-csi-driver = optional(string)
}), {})
manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources.
kms_config = optional(
object({
crk_name = string # Name of key
private_endpoint = optional(bool) # Private endpoint
})
)
worker_pools = optional(
list(
object({
name = string # Worker pool name
vpc_name = string # VPC name
workers_per_subnet = number # Worker nodes per subnet
flavor = string # Worker node flavor
subnet_names = list(string) # List of vpc subnets for worker pool
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
boot_volume_crk_name = optional(string) # Boot volume encryption key name
minimum_size = optional(number) # Minimum number of worker nodes per zone that the cluster autoscaler can scale down the worker pool to.
maximum_size = optional(number) # Maximum number of worker nodes per zone that the cluster autoscaler can scale up the worker pool to.
enable_autoscaling = optional(bool, false) # Flag to set cluster autoscaler to manage scaling for the worker pool.
})
)
)
})
)
| n/a | yes | +| [cos](#input\_cos) | Object describing the cloud object storage instance, buckets, and keys. Set `use_data` to false to create instance |
list(
object({
name = string
use_data = optional(bool)
resource_group = string
plan = optional(string)
random_suffix = optional(bool) # Use a random suffix for COS instance
access_tags = optional(list(string), [])
buckets = list(object({
name = string
storage_class = string
endpoint_type = string
force_delete = bool
single_site_location = optional(string)
region_location = optional(string)
cross_region_location = optional(string)
kms_key = optional(string)
access_tags = optional(list(string), [])
allowed_ip = optional(list(string), [])
hard_quota = optional(number)
archive_rule = optional(object({
days = number
enable = bool
rule_id = optional(string)
type = string
}))
expire_rule = optional(object({
days = optional(number)
date = optional(string)
enable = bool
expired_object_delete_marker = optional(string)
prefix = optional(string)
rule_id = optional(string)
}))
activity_tracking = optional(object({
activity_tracker_crn = string
read_data_events = bool
write_data_events = bool
}))
metrics_monitoring = optional(object({
metrics_monitoring_crn = string
request_metrics_enabled = optional(bool)
usage_metrics_enabled = optional(bool)
}))
}))
keys = optional(
list(object({
name = string
role = string
enable_HMAC = bool
}))
)

})
)
| n/a | yes | | [enable\_transit\_gateway](#input\_enable\_transit\_gateway) | Create transit gateway | `bool` | `true` | no | | [f5\_template\_data](#input\_f5\_template\_data) | Data for all f5 templates |
object({
tmos_admin_password = optional(string)
license_type = optional(string)
byol_license_basekey = optional(string)
license_host = optional(string)
license_username = optional(string)
license_password = optional(string)
license_pool = optional(string)
license_sku_keyword_1 = optional(string)
license_sku_keyword_2 = optional(string)
license_unit_of_measure = optional(string)
do_declaration_url = optional(string)
as3_declaration_url = optional(string)
ts_declaration_url = optional(string)
phone_home_url = optional(string)
template_source = optional(string)
template_version = optional(string)
app_id = optional(string)
tgactive_url = optional(string)
tgstandby_url = optional(string)
tgrefresh_url = optional(string)
})
|
{
"license_type": "none"
}
| no | | [f5\_vsi](#input\_f5\_vsi) | A list describing F5 VSI workloads to create |
list(
object({
name = string
vpc_name = string
primary_subnet_name = string
secondary_subnet_names = list(string)
secondary_subnet_security_group_names = list(
object({
group_name = string
interface_name = string
})
)
ssh_keys = list(string)
f5_image_name = string
machine_type = string
resource_group = optional(string)
enable_management_floating_ip = optional(bool)
enable_external_floating_ip = optional(bool)
security_groups = optional(list(string))
boot_volume_encryption_key_name = optional(string)
hostname = string
domain = string
access_tags = optional(list(string), [])
security_group = optional(
object({
name = string
rules = list(
object({
name = string
direction = string
source = string
tcp = optional(
object({
port_max = number
port_min = number
})
)
udp = optional(
object({
port_max = number
port_min = number
})
)
icmp = optional(
object({
type = number
code = number
})
)
})
)
})
)
block_storage_volumes = optional(list(
object({
name = string
profile = string
capacity = optional(number)
iops = optional(number)
encryption_key = optional(string)
})
))
load_balancers = optional(list(
object({
name = string
type = string
listener_port = number
listener_protocol = string
connection_limit = number
algorithm = string
protocol = string
health_delay = number
health_retries = number
health_timeout = number
health_type = string
pool_member_port = string
idle_connection_timeout = optional(number)
security_group = optional(
object({
name = string
rules = list(
object({
name = string
direction = string
source = string
tcp = optional(
object({
port_max = number
port_min = number
})
)
udp = optional(
object({
port_max = number
port_min = number
})
)
icmp = optional(
object({
type = number
code = number
})
)
})
)
})
)
})
))
})
)
| `[]` | no | diff --git a/cluster.tf b/cluster.tf index 6c00ddc45..a220c9fcd 100644 --- a/cluster.tf +++ b/cluster.tf @@ -26,18 +26,46 @@ locals { openshift = "${data.ibm_container_cluster_versions.cluster_versions.default_openshift_version}_openshift" iks = data.ibm_container_cluster_versions.cluster_versions.default_kube_version } -} -############################################################################## + cluster_data = merge({ + for cluster in ibm_container_vpc_cluster.cluster : + cluster.name => { + crn = cluster.crn + id = cluster.id + resource_group_name = cluster.resource_group_name + resource_group_id = cluster.resource_group_id + vpc_id = cluster.vpc_id + region = var.region + private_service_endpoint_url = cluster.private_service_endpoint_url + public_service_endpoint_url = (cluster.public_service_endpoint_url != "" && cluster.public_service_endpoint_url != null) ? cluster.public_service_endpoint_url : null + ingress_hostname = cluster.ingress_hostname + cluster_console_url = (cluster.public_service_endpoint_url != "" && cluster.public_service_endpoint_url != null) ? "https://console-openshift-console.${cluster.ingress_hostname}" : null + + } + }, { + for cluster in module.cluster : + cluster.cluster_name => { + crn = cluster.cluster_crn + id = cluster.cluster_id + resource_group_id = cluster.resource_group_id + vpc_id = cluster.vpc_id + region = var.region + } + } + ) +} ############################################################################## -# Create IKS/ROKS on VPC Cluster +# Create IKS on VPC Cluster ############################################################################## resource "ibm_container_vpc_cluster" "cluster" { - depends_on = [ibm_iam_authorization_policy.policy] - for_each = local.clusters_map + depends_on = [ibm_iam_authorization_policy.policy] + for_each = { + for index, cluster in local.clusters_map : index => cluster + if cluster.kube_type == "iks" + } name = "${var.prefix}-${each.value.name}" vpc_id = each.value.vpc_id resource_group_id = local.resource_groups[each.value.resource_group] @@ -96,7 +124,10 @@ resource "ibm_container_vpc_cluster" "cluster" { } resource "ibm_resource_tag" "cluster_tag" { - for_each = local.clusters_map + for_each = { + for index, cluster in local.clusters_map : index => cluster + if cluster.kube_type == "iks" + } resource_id = ibm_container_vpc_cluster.cluster[each.key].crn tag_type = "access" tags = each.value.access_tags @@ -106,11 +137,14 @@ resource "ibm_resource_tag" "cluster_tag" { ############################################################################## -# Create Worker Pools +# Create IKS Worker Pools ############################################################################## resource "ibm_container_vpc_worker_pool" "pool" { - for_each = local.worker_pools_map + for_each = { + for index, cluster in local.worker_pools_map : index => cluster + if cluster.kube_type == "iks" + } vpc_id = each.value.vpc_id resource_group_id = local.resource_groups[each.value.resource_group] entitlement = each.value.entitlement @@ -157,14 +191,14 @@ locals { # for each cluster in the clusters_map, get the addons and their versions and create an addons map including the corosponding csi_driver_version cluster_addons = { - for cluster in var.clusters : "${var.prefix}-${cluster.name}" => { + for cluster in local.clusters_map : "${var.prefix}-${cluster.name}" => { id = ibm_container_vpc_cluster.cluster["${var.prefix}-${cluster.name}"].id resource_group_id = ibm_container_vpc_cluster.cluster["${var.prefix}-${cluster.name}"].resource_group_id addons = merge( { for addon_name, addon_version in(cluster.addons != null ? cluster.addons : {}) : addon_name => addon_version if addon_version != null }, local.csi_driver_version["${var.prefix}-${cluster.name}"] != null ? { vpc-block-csi-driver = local.csi_driver_version["${var.prefix}-${cluster.name}"] } : {} ) - } + } if cluster.kube_type == "iks" } } @@ -193,3 +227,71 @@ resource "ibm_container_addons" "addons" { create = "1h" } } + + +############################################################################## +# Create ROKS on VPC Cluster +############################################################################## + +module "cluster" { + for_each = { + for index, cluster in local.clusters_map : index => cluster + if cluster.kube_type == "openshift" + } + source = "terraform-ibm-modules/base-ocp-vpc/ibm" + version = "3.27.0" + resource_group_id = local.resource_groups[each.value.resource_group] + region = var.region + cluster_name = each.value.cluster_name + vpc_id = each.value.vpc_id + ocp_entitlement = each.value.entitlement + vpc_subnets = each.value.vpc_subnets + access_tags = each.value.access_tags + worker_pools = concat( + [ + { + subnet_prefix = each.value.subnet_names[0] + pool_name = "default" + machine_type = each.value.machine_type + workers_per_zone = each.value.workers_per_subnet + minSize = each.value.minimum_size + maxSize = each.value.maximum_size + enableAutoscaling = each.value.enable_autoscaling + boot_volume_encryption_kms_config = { + crk = module.key_management.key_map[each.value.kms_config.crk_name].key_id + kms_instance_id = module.key_management.key_management_guid + } + } + ], + each.value.worker != null ? [ + for pool in each.value.worker : + { + vpc_subnets = pool.vpc_subnets + pool_name = pool.name + machine_type = pool.flavor + workers_per_zone = pool.workers_per_subnet + minSize = each.value.minimum_size + maxSize = each.value.maximum_size + enableAutoscaling = each.value.enable_autoscaling + boot_volume_encryption_kms_config = { + crk = module.key_management.key_map[each.value.kms_config.crk_name].key_id + kms_instance_id = module.key_management.key_management_guid + } + } + ] : [] + ) + ocp_version = each.value.kube_version + tags = var.tags + use_existing_cos = true + disable_public_endpoint = each.value.disable_public_endpoint != null ? each.value.disable_public_endpoint : true + verify_worker_network_readiness = each.value.verify_worker_network_readiness != null ? each.value.verify_worker_network_readiness : false + existing_cos_id = each.value.cos_instance_crn + use_private_endpoint = each.value.use_private_endpoint + addons = each.value.addons + manage_all_addons = each.value.manage_all_addons + disable_outbound_traffic_protection = each.value.disable_outbound_traffic_protection + kms_config = { + instance_id = module.key_management.key_management_guid + crk_id = module.key_management.key_map[each.value.kms_config.crk_name].key_id + } +} diff --git a/dynamic_values/cluster.tf b/dynamic_values/cluster.tf index 58e0f116b..f2f8fc8c5 100644 --- a/dynamic_values/cluster.tf +++ b/dynamic_values/cluster.tf @@ -21,24 +21,34 @@ module "ut_cluster_map" { prefix = "ut" clusters = [ { - name = "test-cluster" - vpc_name = "test" - subnet_names = ["subnet-1", "subnet-3"] - resource_group = "test-resource-group" - kube_type = "openshift" - cos_name = "data-cos" - entitlement = "cloud_pak" - secondary_storage = "300gb.5iops-tier" + name = "test-cluster" + vpc_name = "test" + subnet_names = ["subnet-1", "subnet-3"] + resource_group = "test-resource-group" + kube_type = "openshift" + cos_name = "data-cos" + disable_public_endpoint = false + use_private_endpoint = false + verify_worker_network_readiness = false + minimum_size = 1 + maximum_size = 2 + enable_autoscaling = false + entitlement = "cloud_pak" + secondary_storage = "300gb.5iops-tier" worker_pools = [ { - name = "logging-worker-pool" + name = "default" vpc_name = "test" subnet_names = ["subnet-1", "subnet-3"] workers_per_subnet = 2 + minimum_size = 1 + maximum_size = 2 + enable_autoscaling = true flavor = "spicy" secondary_storage = "300gb.5iops-tier" } ] + vpc_subnets = {} } ] cos_instance_ids = { @@ -66,6 +76,7 @@ module "ut_cluster_map" { cidr = "3" }, ] + subnet_detail_list = null } } } diff --git a/dynamic_values/config_modules/cluster_worker_pools/worker_pools.tf b/dynamic_values/config_modules/cluster_worker_pools/worker_pools.tf index 237a69a5e..8d730218b 100644 --- a/dynamic_values/config_modules/cluster_worker_pools/worker_pools.tf +++ b/dynamic_values/config_modules/cluster_worker_pools/worker_pools.tf @@ -85,6 +85,7 @@ locals { resource_group = cluster.resource_group # add cluster rg vpc_id = var.vpc_modules[pool.vpc_name].vpc_id # add vpc_id subnets = module.worker_pool_subnets["${var.prefix}-${cluster.name}-${pool.name}"].subnets + kube_type = cluster.kube_type }) if pool != null ] if cluster.worker_pools != null ] diff --git a/dynamic_values/config_modules/clusters/clusters.tf b/dynamic_values/config_modules/clusters/clusters.tf index af03c8e22..b4569803d 100644 --- a/dynamic_values/config_modules/clusters/clusters.tf +++ b/dynamic_values/config_modules/clusters/clusters.tf @@ -50,6 +50,13 @@ module "cluster_subnets" { ############################################################################## +module "worker_pools" { + source = "../cluster_worker_pools" + prefix = var.prefix + clusters = var.clusters + vpc_modules = var.vpc_modules +} + ############################################################################## # Cluster List To Map ############################################################################## @@ -62,6 +69,31 @@ module "composed_cluster_map" { vpc_id = var.vpc_modules[cluster.vpc_name].vpc_id subnets = module.cluster_subnets[cluster.name].subnets cos_instance_crn = cluster.kube_type == "openshift" ? var.cos_instance_ids[cluster.cos_name] : null + cluster_name = "${var.prefix}-${cluster.name}" + vpc_subnets = { + (cluster.subnet_names[0]) = [ + for zone in module.cluster_subnets[cluster.name].subnets : + { + id = zone.id + zone = zone.zone + cidr_block = zone.cidr + } + ] + + } + worker = cluster.worker_pools != null ? [ + for pool in cluster.worker_pools : + merge(module.worker_pools.map["${var.prefix}-${cluster.name}-${pool.name}"], { + vpc_subnets = [ + for zone in module.worker_pools.map["${var.prefix}-${cluster.name}-${pool.name}"].subnets : + { + id = zone.id + zone = zone.zone + cidr_block = zone.cidr + } + ] + }) if pool != null + ] : [] }) ] prefix = var.prefix diff --git a/examples/one-vpc-one-vsi/README.md b/examples/one-vpc-one-vsi/README.md index ccf6ebdd4..40e6bba6e 100644 --- a/examples/one-vpc-one-vsi/README.md +++ b/examples/one-vpc-one-vsi/README.md @@ -1,4 +1,4 @@ -# One VPC with one VSI example +# One VPC with one VSI The examples shows how you can use the landing zone module to create a basic, minimal topology by using an `override.json` file. diff --git a/examples/one-vsi-one-iks/README.md b/examples/one-vsi-one-iks/README.md new file mode 100644 index 000000000..b721b3e55 --- /dev/null +++ b/examples/one-vsi-one-iks/README.md @@ -0,0 +1,16 @@ +# One VSI with one IKS + +This example demonstrates how to configure the landing zone module by using an `override.json` file. The example builds on the default topology that is defined in the mixed pattern, and uses a JSON file to override the default configuration. + +The example deploys the following infrastructure: + +- A jump server VSI in the management VPC, exposing a public floating IP address. +- A IKS server in the workload VPC + +:exclamation: **Important:** This example shows an example of basic topology. The topology is not highly available or validated for the IBM Cloud Framework for Financial Services. + +Example usage: +``` +export TF_VAR_ibmcloud_api_key= # pragma: allowlist secret +terraform apply -var=ssh_key='ssh-rsa ...' -var=region=eu-gb -var=prefix=my_slz +``` diff --git a/examples/one-vsi-one-iks/main.tf b/examples/one-vsi-one-iks/main.tf new file mode 100644 index 000000000..504669ff5 --- /dev/null +++ b/examples/one-vsi-one-iks/main.tf @@ -0,0 +1,13 @@ +############################################################################## +# Landing Zone VSI Pattern +############################################################################## + +module "landing_zone" { + source = "../../patterns/mixed" + prefix = var.prefix + region = var.region + ibmcloud_api_key = var.ibmcloud_api_key + ssh_public_key = var.ssh_key + override = true + tags = var.resource_tags +} diff --git a/examples/one-vsi-one-iks/outputs.tf b/examples/one-vsi-one-iks/outputs.tf new file mode 100644 index 000000000..dbfad1da6 --- /dev/null +++ b/examples/one-vsi-one-iks/outputs.tf @@ -0,0 +1,8 @@ +############################################################################## +# Outputs +############################################################################## + +output "landing_zone" { + value = module.landing_zone + description = "Landing zone configuration" +} diff --git a/examples/one-vsi-one-iks/override.json b/examples/one-vsi-one-iks/override.json new file mode 100644 index 000000000..98e9024fa --- /dev/null +++ b/examples/one-vsi-one-iks/override.json @@ -0,0 +1,357 @@ +{ + "atracker": { + "collector_bucket_name": "atracker-bucket", + "receive_global_events": true, + "resource_group": "slz-service-rg", + "add_route": false + }, + "clusters": [ + { + "cos_name": "cos", + "entitlement": "cloud_pak", + "kube_type": "iks", + "kube_version": "default", + "machine_type": "bx2.16x64", + "name": "workload-cluster", + "resource_group": "slz-workload-rg", + "disable_public_endpoint": true, + "use_private_endpoint": false, + "verify_worker_network_readiness": false, + "kms_config": { + "crk_name": "slz-roks-key", + "private_endpoint": true + }, + "subnet_names": [ + "vsi-zone-1", + "vsi-zone-2", + "vsi-zone-3" + ], + "vpc_name": "workload", + "worker_pools": [ + { + "entitlement": "cloud_pak", + "flavor": "bx2.16x64", + "name": "logging-worker-pool", + "subnet_names": [ + "vsi-zone-1", + "vsi-zone-3" + ], + "vpc_name": "workload", + "workers_per_subnet": 2, + "minimum_size": 2, + "maximum_size": 3, + "enable_autoscaling": true + } + ], + "workers_per_subnet": 2, + "minimum_size": 2, + "maximum_size": 3, + "enable_autoscaling": true + } + ], + "cos": [ + { + "buckets": [ + { + "endpoint_type": "public", + "force_delete": true, + "kms_key": "slz-atracker-key", + "name": "atracker-bucket", + "storage_class": "standard" + } + ], + "keys": [ + { + "name": "cos-bind-key", + "role": "Writer", + "enable_HMAC": false + } + ], + "name": "atracker-cos", + "plan": "standard", + "random_suffix": true, + "resource_group": "slz-service-rg", + "use_data": false + }, + { + "buckets": [ + { + "endpoint_type": "public", + "force_delete": true, + "kms_key": "slz-slz-key", + "name": "management-bucket", + "storage_class": "standard" + }, + { + "endpoint_type": "public", + "force_delete": true, + "kms_key": "slz-slz-key", + "name": "workload-bucket", + "storage_class": "standard" + } + ], + "keys": [], + "name": "cos", + "plan": "standard", + "random_suffix": true, + "resource_group": "slz-service-rg", + "use_data": false + } + ], + "enable_transit_gateway": true, + "key_management": { + "keys": [ + { + "key_ring": "slz-slz-ring", + "name": "slz-slz-key", + "root_key": true + }, + { + "key_ring": "slz-slz-ring", + "name": "slz-atracker-key", + "root_key": true + }, + { + "key_ring": "slz-slz-ring", + "name": "slz-vsi-volume-key", + "root_key": true + }, + { + "key_ring": "slz-slz-ring", + "name": "slz-roks-key", + "root_key": true + } + ], + "name": "slz-slz-kms", + "resource_group": "slz-service-rg", + "use_hs_crypto": false + }, + "resource_groups": [ + { + "create": true, + "name": "slz-service-rg", + "use_prefix": true + }, + { + "create": true, + "name": "slz-management-rg", + "use_prefix": true + }, + { + "create": true, + "name": "slz-workload-rg", + "use_prefix": true + } + ], + "security_groups": [], + "security_compliance_center": { + "enable_scc": false, + "is_public": false, + "location_id": "us", + "scope_name": "slz-scope", + "scope_description": "slz-scope-description" + }, + "service_endpoints": "private", + "transit_gateway_connections": [ + "management", + "workload" + ], + "transit_gateway_resource_group": "slz-service-rg", + "virtual_private_endpoints": [ + { + "service_name": "cos", + "service_type": "cloud-object-storage", + "resource_group": "slz-service-rg", + "vpcs": [ + { + "name": "workload", + "subnets": [ + "vpe-zone-1", + "vpe-zone-2", + "vpe-zone-3" + ] + } + ] + } + ], + "vpcs": [ + { + "address_prefixes": { + "zone-1": [], + "zone-2": [], + "zone-3": [] + }, + "default_security_group_rules": [], + "flow_logs_bucket_name": "management-bucket", + "network_acls": [ + { + "add_cluster_rules": true, + "name": "management-acl", + "rules": [ + { + "action": "allow", + "destination": "0.0.0.0/0", + "direction": "inbound", + "name": "allow-all-inbound", + "source": "0.0.0.0/0" + }, + { + "action": "allow", + "destination": "0.0.0.0/0", + "direction": "outbound", + "name": "allow-all-outbound", + "source": "0.0.0.0/0" + } + ] + } + ], + "prefix": "management", + "resource_group": "slz-management-rg", + "subnets": { + "zone-1": [ + { + "acl_name": "management-acl", + "cidr": "10.10.10.0/24", + "name": "vsi-zone-1", + "public_gateway": false + } + ], + "zone-2": null, + "zone-3": null + }, + "use_public_gateways": { + "zone-1": false, + "zone-2": false, + "zone-3": false + } + }, + { + "address_prefixes": { + "zone-1": [], + "zone-2": [], + "zone-3": [] + }, + "default_security_group_rules": [], + "flow_logs_bucket_name": "workload-bucket", + "network_acls": [ + { + "add_ibm_cloud_internal_rules": true, + "add_vpc_connectivity_rules": true, + "prepend_ibm_rules": true, + "add_cluster_rules": true, + "name": "workload-acl", + "rules": [ + { + "action": "allow", + "destination": "10.0.0.0/8", + "direction": "inbound", + "name": "allow-ibm-inbound", + "source": "161.26.0.0/16" + }, + { + "action": "allow", + "destination": "10.0.0.0/8", + "direction": "inbound", + "name": "allow-all-network-inbound", + "source": "10.0.0.0/8" + }, + { + "action": "allow", + "destination": "161.26.0.0/16", + "direction": "outbound", + "name": "allow-all-outbound", + "source": "10.0.0.0/8" + } + ] + } + ], + "prefix": "workload", + "resource_group": "slz-workload-rg", + "subnets": { + "zone-1": [ + { + "acl_name": "workload-acl", + "cidr": "10.40.10.0/24", + "name": "vsi-zone-1", + "public_gateway": false + }, + { + "acl_name": "workload-acl", + "cidr": "10.40.20.0/24", + "name": "vpe-zone-1", + "public_gateway": false + } + ], + "zone-2": [ + { + "acl_name": "workload-acl", + "cidr": "10.50.10.0/24", + "name": "vsi-zone-2", + "public_gateway": false + }, + { + "acl_name": "workload-acl", + "cidr": "10.50.20.0/24", + "name": "vpe-zone-2", + "public_gateway": false + } + ], + "zone-3": [ + { + "acl_name": "workload-acl", + "cidr": "10.60.10.0/24", + "name": "vsi-zone-3", + "public_gateway": false + }, + { + "acl_name": "workload-acl", + "cidr": "10.60.20.0/24", + "name": "vpe-zone-3", + "public_gateway": false + } + ] + }, + "use_public_gateways": { + "zone-1": false, + "zone-2": false, + "zone-3": false + } + } + ], + "vpn_gateways": [], + "vsi": [ + { + "boot_volume_encryption_key_name": "slz-vsi-volume-key", + "image_name": "ibm-ubuntu-18-04-6-minimal-amd64-2", + "machine_type": "cx2-4x8", + "name": "management-server", + "resource_group": "slz-management-rg", + "enable_floating_ip": true, + "security_group": { + "name": "management", + "rules": [ + { + "direction": "inbound", + "name": "allow-all-inbound", + "source": "0.0.0.0/0" + }, + { + "direction": "outbound", + "name": "allow-all-outbound", + "source": "0.0.0.0/0" + } + ], + "vpc_name": "management" + }, + "ssh_keys": [ + "ssh-key" + ], + "subnet_names": [ + "vsi-zone-1" + ], + "vpc_name": "management", + "vsi_per_subnet": 1 + } + ], + "wait_till": "IngressReady" +} diff --git a/examples/one-vsi-one-iks/provider.tf b/examples/one-vsi-one-iks/provider.tf new file mode 100644 index 000000000..df45ef50b --- /dev/null +++ b/examples/one-vsi-one-iks/provider.tf @@ -0,0 +1,4 @@ +provider "ibm" { + ibmcloud_api_key = var.ibmcloud_api_key + region = var.region +} diff --git a/examples/one-vsi-one-iks/variables.tf b/examples/one-vsi-one-iks/variables.tf new file mode 100644 index 000000000..ae056478e --- /dev/null +++ b/examples/one-vsi-one-iks/variables.tf @@ -0,0 +1,37 @@ +############################################################################## +# Account Variables +############################################################################## + +variable "ibmcloud_api_key" { + description = "The IBM Cloud platform API key needed to deploy IAM enabled resources." + type = string + sensitive = true +} + +variable "prefix" { + description = "A unique identifier for resources. Must begin with a lowercase letter and end with a lowerccase letter or number. This prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters." + type = string + default = "mixed-iks" + + validation { + error_message = "Prefix must begin with a lowercase letter and contain only lowercase letters, numbers, and - characters. Prefixes must end with a lowercase letter or number and be 16 or fewer characters." + condition = can(regex("^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$", var.prefix)) && length(var.prefix) <= 16 + } +} + +variable "region" { + description = "Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions." + type = string + default = "au-syd" +} + +variable "ssh_key" { + description = "Public SSH Key for VSI creation. Must be a valid SSH key that does not already exist in the deployment region." + type = string +} + +variable "resource_tags" { + type = list(string) + description = "Optional list of tags to be added to created resources" + default = [] +} diff --git a/examples/one-vsi-one-iks/version.tf b/examples/one-vsi-one-iks/version.tf new file mode 100644 index 000000000..61acc7e99 --- /dev/null +++ b/examples/one-vsi-one-iks/version.tf @@ -0,0 +1,9 @@ +terraform { + required_version = ">= 1.3" + required_providers { + ibm = { + source = "IBM-Cloud/ibm" + version = "1.66.0" + } + } +} diff --git a/ibm_catalog.json b/ibm_catalog.json index 21837b65c..67d2a8bb5 100644 --- a/ibm_catalog.json +++ b/ibm_catalog.json @@ -51,9 +51,7 @@ "name": "quickstart", "install_type": "fullstack", "working_directory": "patterns/vsi-quickstart", - "compliance": { - - }, + "compliance": {}, "release_notes_url": "https://cloud.ibm.com/docs/secure-infrastructure-vpc?topic=secure-infrastructure-vpc-secure-infrastructure-vpc-relnotes", "configuration": [ { @@ -1108,28 +1106,22 @@ "key": "kube_version", "type": "string", "required": true, - "default_value": "4.15_openshift", + "default_value": "4.15", "options": [ { - "displayname": "4.12_openshift", - "value": "4.12_openshift" - }, - { - "displayname": "4.13_openshift", - "value": "4.13_openshift" + "displayname": "4.14", + "value": "4.14" }, { - "displayname": "4.14_openshift", - "value": "4.14_openshift" + "displayname": "4.13", + "value": "4.13" }, { - "displayname": "4.15_openshift", - "value": "4.15_openshift" + "displayname": "4.12", + "value": "4.12" } ], - "custom_config": { - - } + "custom_config": {} }, { "hidden": true, @@ -1304,7 +1296,6 @@ "hidden": true, "key": "use_existing_appid" }, - { "hidden": true, "key": "teleport_version" diff --git a/migration/migration_v4.4.0_to_v5.0.0.sh b/migration/migration_v4.4.0_to_v5.0.0.sh new file mode 100644 index 000000000..54fac4be4 --- /dev/null +++ b/migration/migration_v4.4.0_to_v5.0.0.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +### NOTE: Make sure the clusters are openshift clusters as the SLZ has been updated to only support ROKS cluster creation using module. If there are IKS cluster being tracked in the state file please use the terraform cli to manually move the ROKS clusters. Please refer to the https://github.com/terraform-ibm-modules/terraform-ibm-landing-zone/blob/extend-output/migration/update-version.md on steps to run terraform cli commands. + +# Run Terraform init to import new modules +terraform init + +# Get list of old clusters to be moved +old_clusters="$(terraform state list | grep "ibm_container_vpc_cluster.cluster")" + +# Loop to get the cluster name and run terraform mv command +for cluster in "${old_clusters[@]}"; do + pre="$(echo "$cluster" | rev | cut -d '.' -f 3- | rev)" + cluster_name="$(echo "$cluster" | awk -F'["]' '{for(i=2;i<=NF;i+=2) print $i}')" + terraform state mv "$cluster" "$pre.module.cluster[\"$cluster_name\"].ibm_container_vpc_cluster.cluster[0]" +done + +# Get list of worker pools to be moved +old_worker_pool="$(terraform state list | grep "ibm_container_vpc_worker_pool.pool")" + +# Run only if additional worker pools are present other than the default pool +if ((${#old_worker_pool[@]})); then + for pool in "${old_worker_pool[@]}"; do + for cluster in "${old_clusters[@]}"; do + cluster_name="$(echo "$cluster" | awk -F'["]' '{for(i=2;i<=NF;i+=2) print $i}')" + if [[ $pool == *"$cluster_name"* ]]; then + pre="$(echo "$cluster" | sed -E 's/(([^.]*\.){3}[^.]*)\..*/\1/')" + post="$(echo "$cluster" | sed -E 's/\..*(([^.]*\.){1}[^.]*)/\1/')" + old_pool_name="$(echo "$pool" | awk -F'["]' '{for(i=2;i<=NF;i+=2) print $i}'))" + pool_name=${old_pool_name//$cluster_name-/} + terraform state mv "$pool" "$pre"."$post".ibm_container_vpc_worker_pool.pool[\""$pool_name"\"] + fi + done + done +fi diff --git a/migration/schematics_migration_v4.4.0_to_v5.0.0.sh b/migration/schematics_migration_v4.4.0_to_v5.0.0.sh new file mode 100644 index 000000000..732cacf84 --- /dev/null +++ b/migration/schematics_migration_v4.4.0_to_v5.0.0.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +### NOTE: Make sure the clusters are openshift clusters as the SLZ has been updated to only support ROKS cluster creation using module. If there are IKS cluster being tracked in the state file please use the terraform cli to manually move the ROKS clusters. Please refer to the https://github.com/terraform-ibm-modules/terraform-ibm-landing-zone/blob/extend-output/migration/update-version.md on steps to run terraform cli commands. + +if [ -z "$1" ]; then + echo "ERROR::Please ensure you provide correct Workspace ID as the parameter and try again" +else + WS_ID=$1 + template_id="$(ibmcloud schematics workspace get --id "$WS_ID" -o json | jq -r .template_data[0].id)" + old_clusters="$(ibmcloud schematics state pull --id "$WS_ID" --template "$template_id" | jq -r '.outputs.cluster_names.value' | tr -d '[]," ')" + + for cluster in "${old_clusters[@]}"; do + ibmcloud schematics workspace state mv --id "$WS_ID" --source "module.landing_zone.ibm_container_vpc_cluster.cluster[\"$cluster\"]" --destination "module.landing_zone.module.cluster[\"$cluster\"].ibm_container_vpc_cluster.cluster[0]" + sleep 60 + while true; do + status=$(ibmcloud schematics workspace get --id "$WS_ID" -o json | jq -r .status) + echo "$status" + if [[ "$status" == "ACTIVE" ]]; then + echo "Changes done to $cluster" + break + elif [[ "$status" == "FAILED" ]]; then + echo "ERROR::Unfortunately, the Schematics workspace is in a FAILED state. Please review the workspace and try running the following command manually:" + echo "ibmcloud schematics workspace state mv --id ""$WS_ID"" --source 'module.landing_zone.ibm_container_vpc_cluster.cluster[\"$cluster\"]' --destination 'module.landing_zone.module.cluster[\"$cluster\"].ibm_container_vpc_cluster.cluster[0]'" + break + fi + sleep 10 + status="" + done + done + + old_worker_pools="$(ibmcloud schematics state pull --id "$WS_ID" --template "$template_id" | jq -r '.resources[] | select(.instances[].attributes.worker_pool_name != null) | .instances[].index_key' | sort -u)" + + if ((${#old_worker_pools[@]})); then + for pool in "${old_worker_pools[@]}"; do + for cluster in "${old_clusters[@]}"; do + if [[ $pool == *"$cluster"* ]]; then + pool_name=${pool//$cluster_name-/} + ibmcloud schematics workspace state mv --id "$WS_ID" --source "module.landing_zone.ibm_container_vpc_worker_pool.pool[\"$pool\"]" --destination "module.landing_zone.module.cluster[\"$cluster\"].ibm_container_vpc_worker_pool.pool[\"$pool_name\"]" + sleep 60 + while true; do + status=$(ibmcloud schematics workspace get --id "$WS_ID" -o json | jq -r .status) + echo "$status" + if [[ "$status" == "ACTIVE" ]]; then + echo "Changes done to $pool" + break + elif [[ "$status" == "FAILED" ]]; then + echo "ERROR::Unfortunately, the Schematics workspace is in a FAILED state. Please review the workspace and try running the following command manually:" + break + fi + sleep 10 + status="" + done + fi + done + done + fi +fi diff --git a/migration/update-version.md b/migration/update-version.md new file mode 100644 index 000000000..4d6814ae0 --- /dev/null +++ b/migration/update-version.md @@ -0,0 +1,108 @@ +# Updating to another release of the Red Hat OpenShift pattern + +With release XXX, the Red Hat OpenShift cluster creation moved from a resource block to a separate module. You can update to the new release of the landing zone without destroying and re-creating your Red Hat OpenShift cluster. + +Choose the method that matches how you want to update your module: + +- [Modify the Terraform code](#modify-the-terraform-code) +- [Use the Terraform CLI](#use-the-terraform-cli) +- [Use Schematics](#use-schematics) + +## Update by modifying the Terraform code + +To update your Terraform code directly, following these steps. + +1. Edit the `moved_config.tf` file by adding the following lines: + + ```tf + moved { + from = ibm_container_vpc_cluster.cluster[""] + to = module.cluster[""].ibm_container_vpc_cluster.cluster[0] + } + moved { + from = ibm_container_vpc_cluster.cluster[""] + to = module.cluster[""].ibm_container_vpc_cluster.cluster[0] + } + ``` + + Where `` and `` are your workload cluster and management cluster names. + +1. Run `terraform plan` and check the output. When you're satisfied with the output, run `terraform apply` to update to the new release. + +## Update by using the Terraform CLI + +To update your code with the Terraform CLI, follow these steps. + +1. Run this command to retrieve the cluster names from the state file. + + ```tf + terraform state list | grep "ibm_container_vpc_cluster.cluster" + ``` + +1. Copy the cluster name from the list. For example, `debug-ocp-management-cluster`, `debug-ocp-workload-cluster` +1. Run `terraform init` to initialize the newly added resources. +1. Run the following command for each element in the list: + + ```tf + terraform state mv 'module.landing_zone.ibm_container_vpc_cluster.cluster[""]' 'module.landing_zone.module.cluster[""].ibm_container_vpc_cluster.cluster[0]' + ``` + +1. Run `terraform apply` to apply the changes to the infrastructure without re-creating cluster. +1. (Optional) If you have worker pools other than the default pool, also move them to prevent them from being destroyed and re-created. As in the previous steps, run the following commands: + + 1. Run the command to get a list of all the worker pools. + + ```tf + terraform state list | grep "ibm_container_vpc_worker_pool.pool" + ``` + + 1. Separate the cluster name and the worker pool name. For example, you might get something similar to the following output: + + ```tf + module.landing_zone.ibm_container_vpc_worker_pool.pool["debug-ocp-workload-cluster-logging-worker-pool"] + ``` + + In this case, the cluster_name is `debug-ocp-workload-cluster` and the worker_pool_name is `logging-worker-pool`. + + 1. For each element in the list, run the following command: + + ```tf + terraform state mv 'module.landing_zone.ibm_container_vpc_worker_pool.pool["-"]' 'module.landing_zone.module.cluster[""].ibm_container_vpc_worker_pool.pool[""]' + ``` + +## Update by using Schematics + +If you deployed the landing zone by using a Schematics workspace, use the following commands to update to the release. + +1. Get the cluster name from the template: + + 1. Find the template ID: + + ```sh + ibmcloud schematics workspace get --id + ``` + + The output includes the template ID in the `Template Variables for` field. + + 1. Run the following command with the template ID: + + ```sh + ibmcloud schematics state pull --id WORKSPACE_ID --template TEMPLATE_ID | jq -r '.outputs.cluster_names.value' + ``` + +1. Copy the cluster name from the list, as in the following example: + + ```tf + [ + "debug-ocp-management-cluster", + "debug-ocp-workload-cluster" + ] + ``` + +1. For each element in the list, run the following command: + + ```sh + ibmcloud schematics workspace state mv --id WORKSPACE_ID --source 'module.landing_zone.ibm_container_vpc_cluster.cluster[""]' --destination 'module.landing_zone.module.cluster[""].ibm_container_vpc_cluster.cluster[0]' + ``` + +1. Update the offering version. diff --git a/outputs.tf b/outputs.tf index c4031cb77..d4f2c0d33 100644 --- a/outputs.tf +++ b/outputs.tf @@ -68,10 +68,16 @@ output "bastion_host_names" { output "cluster_names" { description = "List of create cluster names" - value = [ - for cluster in ibm_container_vpc_cluster.cluster : - cluster.name - ] + value = flatten([ + [ + for cluster in ibm_container_vpc_cluster.cluster : + cluster.name + ], + [ + for cluster in module.cluster : + cluster.cluster_name + ] + ]) } output "workload_cluster_id" { @@ -86,21 +92,7 @@ output "management_cluster_id" { output "cluster_data" { description = "List of cluster data" - value = { - for cluster in ibm_container_vpc_cluster.cluster : - cluster.name => { - crn = cluster.crn - id = cluster.id - resource_group_name = cluster.resource_group_name - resource_group_id = cluster.resource_group_id - vpc_id = cluster.vpc_id - region = var.region - private_service_endpoint_url = cluster.private_service_endpoint_url - public_service_endpoint_url = (cluster.public_service_endpoint_url != "" && cluster.public_service_endpoint_url != null) ? cluster.public_service_endpoint_url : null - ingress_hostname = cluster.ingress_hostname - cluster_console_url = (cluster.public_service_endpoint_url != "" && cluster.public_service_endpoint_url != null) ? "https://console-openshift-console.${cluster.ingress_hostname}" : null - } - } + value = local.cluster_data } ############################################################################## @@ -229,7 +221,7 @@ output "vpc_resource_list" { name = vpc.vpc_data.name resource_group_id = vpc.vpc_data.resource_group region = var.region - clusters = flatten([for cluster in ibm_container_vpc_cluster.cluster : + clusters = flatten([for cluster in local.cluster_data : cluster.id if cluster.vpc_id == vpc.vpc_data.id ]) vsi = distinct(flatten([ diff --git a/patterns/mixed/config.tf b/patterns/mixed/config.tf index 9601b39a1..1a22a48f8 100644 --- a/patterns/mixed/config.tf +++ b/patterns/mixed/config.tf @@ -111,15 +111,21 @@ locals { crk_name = "${var.prefix}-roks-key" private_endpoint = true } - workers_per_subnet = var.workers_per_zone - machine_type = var.flavor - kube_type = "openshift" - kube_version = var.kube_version - resource_group = "${var.prefix}-${var.vpcs[1]}-rg" - cos_name = "cos" - entitlement = var.entitlement - secondary_storage = var.secondary_storage - boot_volume_crk_name = "${var.prefix}-roks-key" + workers_per_subnet = var.workers_per_zone + minimum_size = var.minimum_size + maximum_size = var.maximum_size + enable_autoscaling = var.enable_autoscaling + machine_type = var.flavor + kube_type = "openshift" + kube_version = var.kube_version + resource_group = "${var.prefix}-${var.vpcs[1]}-rg" + cos_name = "cos" + entitlement = var.entitlement + secondary_storage = var.secondary_storage + disable_public_endpoint = var.disable_public_endpoint + use_private_endpoint = var.use_private_endpoint + verify_worker_network_readiness = var.verify_worker_network_readiness + boot_volume_crk_name = "${var.prefix}-roks-key" # By default, create dedicated pool for logging worker_pools = [ { @@ -131,6 +137,9 @@ locals { ] entitlement = var.entitlement workers_per_subnet = var.workers_per_zone + minimum_size = var.minimum_size + maximum_size = var.maximum_size + enable_autoscaling = var.enable_autoscaling flavor = var.flavor secondary_storage = var.secondary_storage boot_volume_crk_name = "${var.prefix}-roks-key" diff --git a/patterns/mixed/override.json b/patterns/mixed/override.json index 7139628ce..ed9a03fcd 100644 --- a/patterns/mixed/override.json +++ b/patterns/mixed/override.json @@ -14,6 +14,9 @@ "machine_type": "bx2.16x64", "name": "workload-cluster", "resource_group": "slz-workload-rg", + "disable_public_endpoint": true, + "use_private_endpoint": false, + "verify_worker_network_readiness": false, "kms_config": { "crk_name": "slz-roks-key", "private_endpoint": true @@ -35,10 +38,16 @@ "vsi-zone-3" ], "vpc_name": "workload", - "workers_per_subnet": 2 + "workers_per_subnet": 2, + "minimum_size": 2, + "maximum_size": 3, + "enable_autoscaling": true } ], - "workers_per_subnet": 2 + "workers_per_subnet": 2, + "minimum_size": 2, + "maximum_size": 3, + "enable_autoscaling": true } ], "cos": [ diff --git a/patterns/mixed/variables.tf b/patterns/mixed/variables.tf index 619536e72..19eb9afaa 100644 --- a/patterns/mixed/variables.tf +++ b/patterns/mixed/variables.tf @@ -211,6 +211,42 @@ variable "wait_till" { } } +variable "disable_public_endpoint" { + type = bool + description = "Flag indicating that the public endpoint should be disabled" + default = true +} + +variable "minimum_size" { + type = number + description = "Minimum number of worker nodes per zone that the cluster autoscaler can scale down the worker pool to." + default = 1 +} + +variable "maximum_size" { + type = number + description = "Maximum number of worker nodes per zone that the cluster autoscaler can scale up the worker pool to." + default = 3 +} + +variable "enable_autoscaling" { + type = bool + description = "Flag to set cluster autoscaler to manage scaling for the worker pool." + default = false +} + +variable "use_private_endpoint" { + type = bool + description = "Set this to true to force all cluster related api calls to use the IBM Cloud private endpoints." + default = false +} + +variable "verify_worker_network_readiness" { + type = bool + description = "By setting this to true, a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false." + default = false +} + ############################################################################## diff --git a/patterns/roks-quickstart/main.tf b/patterns/roks-quickstart/main.tf index 5b314b977..9d73f7fca 100644 --- a/patterns/roks-quickstart/main.tf +++ b/patterns/roks-quickstart/main.tf @@ -53,7 +53,8 @@ locals { "worker_pools": [], "workers_per_subnet": 1, "entitlement": ${local.entitlement_val}, - "disable_public_endpoint": false + "disable_public_endpoint": false, + "use_private_endpoint": false } ], "cos": [ diff --git a/patterns/roks/main.tf b/patterns/roks/main.tf index 0399f6f2d..10b4c04a8 100644 --- a/patterns/roks/main.tf +++ b/patterns/roks/main.tf @@ -102,6 +102,12 @@ module "roks_landing_zone" { tmos_admin_password = var.tmos_admin_password license_type = var.license_type teleport_management_zones = var.teleport_management_zones + disable_public_endpoint = var.disable_public_endpoint + use_private_endpoint = var.use_private_endpoint + minimum_size = var.minimum_size + maximum_size = var.maximum_size + enable_autoscaling = var.enable_autoscaling + verify_worker_network_readiness = var.verify_worker_network_readiness IC_SCHEMATICS_WORKSPACE_ID = var.IC_SCHEMATICS_WORKSPACE_ID kms_wait_for_apply = var.kms_wait_for_apply } diff --git a/patterns/roks/module/config.tf b/patterns/roks/module/config.tf index 897a29d5a..acef9c307 100644 --- a/patterns/roks/module/config.tf +++ b/patterns/roks/module/config.tf @@ -92,10 +92,13 @@ locals { machine_type = var.flavor kube_type = "openshift" kube_version = var.kube_version + disable_public_endpoint = var.disable_public_endpoint + use_private_endpoint = var.use_private_endpoint + verify_worker_network_readiness = var.verify_worker_network_readiness resource_group = "${var.prefix}-${network}-rg" + secondary_storage = var.secondary_storage cos_name = "cos" entitlement = var.entitlement - secondary_storage = var.secondary_storage addons = var.cluster_addons manage_all_addons = var.manage_all_cluster_addons boot_volume_crk_name = "${var.prefix}-roks-key" @@ -103,7 +106,9 @@ locals { cluster_force_delete_storage = var.cluster_force_delete_storage operating_system = var.operating_system kms_wait_for_apply = var.kms_wait_for_apply - # By default, create dedicated pool for logging + minimum_size = var.minimum_size + maximum_size = var.maximum_size + enable_autoscaling = var.enable_autoscaling worker_pools = [ # { # name = "logging-worker-pool" diff --git a/patterns/roks/module/variables.tf b/patterns/roks/module/variables.tf index 89cb73e93..3d53996b9 100644 --- a/patterns/roks/module/variables.tf +++ b/patterns/roks/module/variables.tf @@ -1,7 +1,6 @@ ############################################################################## # Account Variables ############################################################################## - variable "prefix" { description = "A unique identifier for resources that is prepended to resources that are provisioned. Must begin with a lowercase letter and end with a lowercase letter or number. Must be 13 or fewer characters." type = string @@ -223,6 +222,42 @@ variable "entitlement" { default = null } +variable "disable_public_endpoint" { + type = bool + description = "Flag indicating that the public endpoint should be disabled" + default = true +} + +variable "minimum_size" { + type = number + description = "Minimum number of worker nodes per zone that the cluster autoscaler can scale down the worker pool to." + default = 1 +} + +variable "maximum_size" { + type = number + description = "Maximum number of worker nodes per zone that the cluster autoscaler can scale up the worker pool to." + default = 3 +} + +variable "enable_autoscaling" { + type = bool + description = "Flag to set cluster autoscaler to manage scaling for the worker pool." + default = true +} + +variable "use_private_endpoint" { + type = bool + description = "Set this to true to force all cluster related api calls to use the IBM Cloud private endpoints." + default = false +} + +variable "verify_worker_network_readiness" { + type = bool + description = "By setting this to true, a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false." + default = false +} + variable "secondary_storage" { description = "Optionally specify a secondary storage option to attach to all cluster worker nodes. This value is immutable and can't be changed after provisioning. Use the IBM Cloud CLI command ibmcloud ks flavors to find valid options, e.g ibmcloud ks flavor get --flavor bx2.16x64 --provider vpc-gen2 --zone us-south-1." type = string diff --git a/patterns/roks/override.json b/patterns/roks/override.json index 52eedfb53..dd9f4df90 100644 --- a/patterns/roks/override.json +++ b/patterns/roks/override.json @@ -14,6 +14,9 @@ "machine_type": "bx2.16x64", "name": "management-cluster", "resource_group": "slz-management-rg", + "disable_public_endpoint": true, + "use_private_endpoint": false, + "verify_worker_network_readiness": false, "disable_outbound_traffic_protection": false, "cluster_force_delete_storage": false, "operating_system": null, @@ -39,10 +42,16 @@ "vsi-zone-3" ], "vpc_name": "management", - "workers_per_subnet": 2 + "workers_per_subnet": 2, + "minimum_size": 2, + "maximum_size": 3, + "enable_autoscaling": true } ], - "workers_per_subnet": 2 + "workers_per_subnet": 2, + "minimum_size": 2, + "maximum_size": 3, + "enable_autoscaling": true }, { "cos_name": "cos", @@ -52,6 +61,9 @@ "machine_type": "bx2.16x64", "name": "workload-cluster", "resource_group": "slz-workload-rg", + "disable_public_endpoint": true, + "use_private_endpoint": false, + "verify_worker_network_readiness": false, "disable_outbound_traffic_protection": false, "cluster_force_delete_storage": false, "kms_wait_for_apply": true, diff --git a/patterns/roks/scripts/deploy-pre-ansible-playbook.yaml b/patterns/roks/scripts/deploy-pre-ansible-playbook.yaml new file mode 100644 index 000000000..334f5c802 --- /dev/null +++ b/patterns/roks/scripts/deploy-pre-ansible-playbook.yaml @@ -0,0 +1,85 @@ +- name: deploy pre playbook + hosts: localhost + tasks: + # - name: Login to ibmcloud + # ansible.builtin.shell: + # cmd: ibmcloud login --apikey "{{ ibmcloud_api_key }}" -q -r {{ workspace_id.split(".")[0] }} + - name: Get Template ID + ansible.builtin.shell: + cmd: ibmcloud schematics workspace get --id "{{ workspace_id }}" -o json | jq -r .template_data[0].id + register: template_id + - name: Get Cluster names + ansible.builtin.shell: + cmd: ibmcloud schematics state pull --id "{{ workspace_id }}" --template "{{ template_id.stdout }}" | jq -r '.outputs.cluster_names.value' | tr -d '[]," ' + register: cluster_names + - set_fact: + cleaned_list: "{{ cluster_names.stdout_lines | reject('equalto', '') | list }}" + - name: Print the Cluster names + ansible.builtin.debug: + var: cleaned_list + - name: Check for old version of landing zone + ansible.builtin.shell: | + cluster="$(ibmcloud schematics state pull --id "{{ workspace_id }}" --template "{{ template_id.stdout }}" | jq -r '.resources[] | select((.module == "module.roks_landing_zone.module.landing_zone") and (.name == "cluster"))')" + if [ -n "$cluster" ]; then + echo "true" + else + echo "false" + fi + register: version_check + - name: Update state file + ansible.builtin.shell: | + ibmcloud schematics workspace state mv --id "{{ workspace_id }}" --source "module.roks_landing_zone.module.landing_zone.ibm_container_vpc_cluster.cluster[\"{{ item }}\"]" --destination "module.roks_landing_zone.module.landing_zone.module.cluster[\"{{ item }}\"].ibm_container_vpc_cluster.cluster[0]" + sleep 60 + while true; do + status=$(ibmcloud schematics workspace get --id "{{ workspace_id }}" -o json | jq -r .status) + echo "$status" + if [[ "$status" == "ACTIVE" ]]; then + echo "Changes done to {{ item }}" + break + elif [[ "$status" == "FAILED" ]]; then + echo "ERROR::Unfortunately, the Schematics workspace is in a FAILED state. Please review the workspace and try running the following command manually:" + echo "ibmcloud schematics workspace state mv --id "{{ workspace_id }}" --source 'module.landing_zone.ibm_container_vpc_cluster.cluster[\"{{ item }}\"]' --destination 'module.landing_zone.module.cluster[\"{{ item }}\"].ibm_container_vpc_cluster.cluster[0]'" + break + fi + sleep 10 + status="" + done + when: version_check.stdout|bool == true + with_items: "{{ cleaned_list }}" + - name: Check for worker pools + ansible.builtin.shell: | + old_worker_pools="$(ibmcloud schematics state pull --id "{{ workspace_id }}" --template "{{ template_id.stdout }}" | jq -r '.resources[] | select(.instances[].attributes.worker_pool_name != null) | .instances[].index_key' | sort -u)" + if [ -n "$old_worker_pools" ]; then + echo "true" + else + echo "false" + fi + register: worker_pool_check + - name: Update state2 file + ansible.builtin.shell: | + old_worker_pools=($(ibmcloud schematics state pull --id "{{ workspace_id }}" --template "{{ template_id.stdout }}" | jq -r '.resources[] | select(.instances[].attributes.worker_pool_name != null) | .instances[].index_key' | sort -u)) + for pool in "${old_worker_pools[@]}"; do + if [[ $pool == *"{{ item }}"* ]]; then + pool_name=${pool//{{ item }}-/} + ibmcloud schematics workspace state mv --id "{{ workspace_id }}" --source "module.roks_landing_zone.module.landing_zone.ibm_container_vpc_worker_pool.pool[\"$pool\"]" --destination "module.roks_landing_zone.module.landing_zone.module.cluster[\"{{ item }}\"].ibm_container_vpc_worker_pool.pool[\"$pool_name\"]" + sleep 60 + while true; do + status=$(ibmcloud schematics workspace get --id "{{ workspace_id }}" -o json | jq -r .status) + echo "$status" + if [[ "$status" == "ACTIVE" ]]; then + echo "Changes done to {{ item }}" + break + elif [[ "$status" == "FAILED" ]]; then + echo "ERROR::Unfortunately, the Schematics workspace is in a FAILED state. Please review the workspace and try running the following command manually:" + echo "ibmcloud schematics workspace state mv --id "{{ workspace_id }}" --source 'module.roks_landing_zone.module.landing_zone.ibm_container_vpc_cluster.cluster[\"{{ item }}\"]' --destination 'module.roks_landing_zone.module.landing_zone.module.cluster[\"{{ item }}\"].ibm_container_vpc_cluster.cluster[0]'" + break + fi + sleep 10 + status="" + done + fi + done + with_items: "{{ cleaned_list }}" + when: + - worker_pool_check.stdout|bool == true + - version_check.stdout|bool == true diff --git a/patterns/roks/variables.tf b/patterns/roks/variables.tf index ed38efa12..5e072aadf 100644 --- a/patterns/roks/variables.tf +++ b/patterns/roks/variables.tf @@ -187,15 +187,16 @@ variable "cluster_zones" { variable "kube_version" { description = "The version of the OpenShift cluster that should be provisioned. Current supported values are '4.15_openshift', '4.14_openshift', '4.13_openshift', or '4.12_openshift'. NOTE: This is only used during initial cluster provisioning, but ignored for future updates. Cluster version updates should be done outside of terraform to prevent possible destructive changes." type = string - default = "4.15_openshift" + default = "4.15" validation { condition = anytrue([ - var.kube_version == "4.15_openshift", - var.kube_version == "4.14_openshift", - var.kube_version == "4.13_openshift", - var.kube_version == "4.12_openshift", + var.kube_version == null, + var.kube_version == "4.15", + var.kube_version == "4.14", + var.kube_version == "4.13", + var.kube_version == "4.12", ]) - error_message = "The kube_version value can currently only be '4.15_openshift', '4.14_openshift', '4.13_openshift', or '4.12_openshift'" + error_message = "The kube_version value can currently only be '4.15', '4.14', '4.13', or '4.12'" } } @@ -208,7 +209,7 @@ variable "flavor" { variable "workers_per_zone" { description = "Number of workers in each zone of the cluster. OpenShift requires at least 2 workers." type = number - default = 1 + default = 3 } variable "wait_till" { @@ -238,6 +239,42 @@ variable "entitlement" { default = null } +variable "disable_public_endpoint" { + type = bool + description = "Flag indicating that the public endpoint should be disabled" + default = true +} + +variable "use_private_endpoint" { + type = bool + description = "Set this to true to force all cluster related api calls to use the IBM Cloud private endpoints." + default = false +} + +variable "minimum_size" { + type = number + description = "Minimum number of worker nodes per zone that the cluster autoscaler can scale down the worker pool to." + default = 1 +} + +variable "maximum_size" { + type = number + description = "Maximum number of worker nodes per zone that the cluster autoscaler can scale up the worker pool to." + default = 3 +} + +variable "enable_autoscaling" { + type = bool + description = "Flag to set cluster autoscaler to manage scaling for the worker pool." + default = false +} + +variable "verify_worker_network_readiness" { + type = bool + description = "By setting this to true, a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false." + default = false +} + variable "cluster_addons" { type = object({ debug-tool = optional(string) diff --git a/patterns/vpc/module/variables.tf b/patterns/vpc/module/variables.tf index 0daa4abfb..7df9021ef 100644 --- a/patterns/vpc/module/variables.tf +++ b/patterns/vpc/module/variables.tf @@ -1,7 +1,6 @@ ############################################################################## # Account Variables ############################################################################## - variable "prefix" { description = "A unique identifier for resources that is prepended to resources that are provisioned. Must begin with a lowercase letter and end with a lowercase letter or number. Must be 16 or fewer characters." type = string diff --git a/patterns/vsi/module/variables.tf b/patterns/vsi/module/variables.tf index 18976c2d4..9e1062028 100644 --- a/patterns/vsi/module/variables.tf +++ b/patterns/vsi/module/variables.tf @@ -1,7 +1,6 @@ ############################################################################## # Account Variables ############################################################################## - variable "prefix" { description = "A unique identifier for resources that is prepended to resources that are provisioned. Must begin with a lowercase letter and end with a lowercase letter or number. Must be 16 or fewer characters." type = string diff --git a/tests/pr_test.go b/tests/pr_test.go index 4fea34308..68871dff5 100644 --- a/tests/pr_test.go +++ b/tests/pr_test.go @@ -24,6 +24,7 @@ import ( ) const quickStartPatternTerraformDir = "patterns/vsi-quickstart" +const iksExampleTerraformDir = "examples/one-vsi-one-iks" const roksQuickstartPatternTerraformDir = "patterns/roks-quickstart" const roksPatternTerraformDir = "patterns/roks" const vsiPatternTerraformDir = "patterns/vsi" @@ -332,6 +333,37 @@ func TestRunUpgradeVpcPattern(t *testing.T) { } } +func setupOptionsiksExample(t *testing.T, prefix string) *testhelper.TestOptions { + + sshPublicKey := sshPublicKey(t) + + options := testhelper.TestOptionsDefault(&testhelper.TestOptions{ + Testing: t, + TerraformDir: iksExampleTerraformDir, + Prefix: prefix, + ResourceGroup: resourceGroup, + CloudInfoService: sharedInfoSvc, + }) + + options.TerraformVars = map[string]interface{}{ + "ssh_key": sshPublicKey, + "prefix": options.Prefix, + "region": options.Region, + } + + return options +} + +func TestRunIksExample(t *testing.T) { + t.Parallel() + + options := setupOptionsiksExample(t, "slz-iks") + + output, err := options.RunTestConsistency() + assert.Nil(t, err, "This should not have errored") + assert.NotNil(t, output, "Expected some output") +} + func TestRunOverride(t *testing.T) { t.Parallel() diff --git a/tests/resources/slz-vpc/main.tf b/tests/resources/slz-vpc/main.tf index f8f631921..722e354ec 100644 --- a/tests/resources/slz-vpc/main.tf +++ b/tests/resources/slz-vpc/main.tf @@ -9,4 +9,5 @@ module "landing_zone" { tags = var.resource_tags enable_transit_gateway = false add_atracker_route = false + ibmcloud_api_key = var.ibmcloud_api_key } diff --git a/variables.tf b/variables.tf index 0924c065a..4e9b6d8d4 100644 --- a/variables.tf +++ b/variables.tf @@ -854,6 +854,11 @@ variable "clusters" { disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion + verify_worker_network_readiness = optional(bool) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false. + use_private_endpoint = optional(bool, false) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints. + minimum_size = optional(number) # Minimum number of worker nodes per zone that the cluster autoscaler can scale down the worker pool to. + maximum_size = optional(number) # Maximum number of worker nodes per zone that the cluster autoscaler can scale up the worker pool to. + enable_autoscaling = optional(bool, false) # Flag to set cluster autoscaler to manage scaling for the worker pool. operating_system = optional(string, null) #The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed addons = optional(object({ # Map of OCP cluster add-on versions to install @@ -877,15 +882,18 @@ variable "clusters" { worker_pools = optional( list( object({ - name = string # Worker pool name - vpc_name = string # VPC name - workers_per_subnet = number # Worker nodes per subnet - flavor = string # Worker node flavor - subnet_names = list(string) # List of vpc subnets for worker pool - entitlement = optional(string) # entitlement option for openshift - secondary_storage = optional(string) # Secondary storage type - boot_volume_crk_name = optional(string) # Boot volume encryption key name + name = string # Worker pool name + vpc_name = string # VPC name + workers_per_subnet = number # Worker nodes per subnet + flavor = string # Worker node flavor + subnet_names = list(string) # List of vpc subnets for worker pool + entitlement = optional(string) # entitlement option for openshift + secondary_storage = optional(string) # Secondary storage type + boot_volume_crk_name = optional(string) # Boot volume encryption key name operating_system = optional(string) # The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . + minimum_size = optional(number) # Minimum number of worker nodes per zone that the cluster autoscaler can scale down the worker pool to. + maximum_size = optional(number) # Maximum number of worker nodes per zone that the cluster autoscaler can scale up the worker pool to. + enable_autoscaling = optional(bool, false) # Flag to set cluster autoscaler to manage scaling for the worker pool. }) ) )