diff --git a/.github/workflows/linter-analysis.yaml b/.github/workflows/linter-analysis.yaml
index 3620bd64..b4454d04 100644
--- a/.github/workflows/linter-analysis.yaml
+++ b/.github/workflows/linter-analysis.yaml
@@ -2,7 +2,7 @@ name: Linter Analysis
on:
push:
- branches: ['*'] # '*' will cause the workflow to run on all commits to all branches.
+ branches: [ '**' ] # '**' will cause the workflow to run on all commits to all branches, including those with path separators
jobs:
# Hadolint: Job-1
@@ -54,8 +54,15 @@ jobs:
tflint_version: latest
github_token: ${{ secrets.LINTER_TEST_TOKEN }}
+ # Necessary so we can recursively tflint our modules folder
+ # with the plugin, not needed for regular project use.
+ - name: Initializing modules
+ run: |
+ terraform -chdir=modules/aws_autoscaling init
+ terraform -chdir=modules/aws_ebs_csi init
+
- name: Initializing TFLint
- run: TFLINT_LOG=info tflint --init -c .tflint.hcl
+ run: TFLINT_LOG=info tflint --init -c "$(pwd)/linting-configs/.tflint.hcl"
- name: Run TFLint Action
- run: TFLINT_LOG=info tflint -c .tflint.hcl
+ run: TFLINT_LOG=info tflint -c "$(pwd)/linting-configs/.tflint.hcl" --recursive
diff --git a/Dockerfile b/Dockerfile
index 5d4d4e9a..53437511 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,9 +1,9 @@
-ARG TERRAFORM_VERSION=1.4.5
-ARG AWS_CLI_VERSION=2.11.21
+ARG TERRAFORM_VERSION=1.6.3
+ARG AWS_CLI_VERSION=2.13.33
FROM hashicorp/terraform:$TERRAFORM_VERSION as terraform
FROM amazon/aws-cli:$AWS_CLI_VERSION
-ARG KUBECTL_VERSION=1.26.7
+ARG KUBECTL_VERSION=1.26.10
WORKDIR /viya4-iac-aws
diff --git a/README.md b/README.md
index d011863c..982c78b3 100644
--- a/README.md
+++ b/README.md
@@ -47,10 +47,10 @@ The following are also required:
#### Terraform Requirements:
-- [Terraform](https://www.terraform.io/downloads.html) v1.4.5
-- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - v1.26.7
+- [Terraform](https://www.terraform.io/downloads.html) v1.6.3
+- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - v1.26.10
- [jq](https://stedolan.github.io/jq/) v1.6
-- [AWS CLI](https://aws.amazon.com/cli) (optional; useful as an alternative to the AWS Web Console) v2.7.22
+- [AWS CLI](https://aws.amazon.com/cli) (optional; useful as an alternative to the AWS Web Console) v2.13.33
#### Docker Requirements:
diff --git a/container-structure-test.yaml b/container-structure-test.yaml
index 0635b134..063aaaa0 100644
--- a/container-structure-test.yaml
+++ b/container-structure-test.yaml
@@ -17,14 +17,14 @@ commandTests:
- name: "terraform version"
command: "terraform"
args: ["--version"]
- expectedOutput: ["Terraform v1.4.5"]
+ expectedOutput: ["Terraform v1.6.3"]
- name: "aws-cli version"
command: "sh"
args:
- -c
- |
aws --version
- expectedOutput: ["aws-cli/2.11.21"]
+ expectedOutput: ["aws-cli/2.13.33"]
metadataTest:
workdir: "/viya4-iac-aws"
diff --git a/docs/CONFIG-VARS.md b/docs/CONFIG-VARS.md
index 0d7d8982..0b2a3041 100644
--- a/docs/CONFIG-VARS.md
+++ b/docs/CONFIG-VARS.md
@@ -11,7 +11,10 @@ Supported configuration variables are listed in the tables below. All variables
- [Using Static Credentials](#using-static-credentials)
- [Using AWS Profile](#using-aws-profile)
- [Admin Access](#admin-access)
+ - [Public Access CIDRs](#public-access-cidrs)
+ - [Private Access CIDRs](#private-access-cidrs)
- [Networking](#networking)
+ - [Subnet requirements](#subnet-requirements)
- [Use Existing](#use-existing)
- [IAM](#iam)
- [General](#general)
@@ -41,7 +44,7 @@ Terraform input variables can be set in the following ways:
### AWS Authentication
-The Terraform process manages AWS resources on your behalf. In order to do so, it needs the credentials for an AWS identity with the required permissons.
+The Terraform process manages AWS resources on your behalf. In order to do so, it needs the credentials for an AWS identity with the required permissions.
You can use either static credentials or the name of an AWS profile. If both are specified, the static credentials take precedence. For recommendations on how to set these variables in your environment, see [Authenticating Terraform to Access AWS](./user/TerraformAWSAuthentication.md).
@@ -63,7 +66,7 @@ You can use either static credentials or the name of an AWS profile. If both are
## Admin Access
-By default, the pubic endpoints of the AWS resources that are being created are only accessible through authenticated AWS clients (for example, the AWS Portal, the AWS CLI, etc.).
+By default, the public endpoints of the AWS resources that are being created are only accessible through authenticated AWS clients (for example, the AWS Portal, the AWS CLI, etc.).
To enable access for other administrative client applications (for example `kubectl`, `psql`, etc.), you can set Security Group rules to control access from your source IP addresses.
To set these permissions as part of this Terraform script, specify ranges of IP addresses in [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). Contact your Network Administrator to find the public CIDR range of your network.
@@ -72,31 +75,76 @@ NOTE: When deploying infrastructure into a private network (e.g. a VPN), with no
NOTE: The script will either create a new Security Group, or use an existing Security Group, if specified in the `security_group_id` variable.
+### Public Access CIDRs
+
You can use `default_public_access_cidrs` to set a default range for all created resources. To set different ranges for other resources, define the appropriate variable. Use an empty list [] to disallow access explicitly.
|
Name
| Description
| Type
| Default
| Notes
|
| :--- | :--- | :--- | :--- | :--- |
| default_public_access_cidrs | IP address ranges that are allowed to access all created cloud resources | list of strings | | Set a default for all resources. |
-| cluster_endpoint_public_access_cidrs | IP address ranges that are allowed to access the AKS cluster API | list of strings | | For client admin access to the cluster api (by kubectl, for example). Only used with `cluster_api_mode=public` |
+| cluster_endpoint_public_access_cidrs | IP address ranges that are allowed to access the EKS cluster API | list of strings | | For client admin access to the cluster api (by kubectl, for example). Only used with `cluster_api_mode=public` |
| vm_public_access_cidrs | IP address ranges that are allowed to access the VMs | list of strings | | Opens port 22 for SSH access to the jump server and/or NFS VM by adding Ingress Rule on the Security Group. Only used with `create_jump_public_ip=true` or `create_nfs_public_ip=true`. |
-| postgres_access_cidrs | IP address ranges that are allowed to access the AWS PostgreSQL server | list of strings || Opens port 5432 by adding Ingress Rule on the Security Group. Only used when creating postgres instances.|
+| postgres_public_access_cidrs | IP address ranges that are allowed to access the AWS PostgreSQL server | list of strings || Opens port 5432 by adding Ingress Rule on the Security Group. Only used when creating postgres instances.|
+
+### Private Access CIDRs
+
+For resources accessible at private IP addresses only, it may be necessary, depending upon your networking configuration, to specify additional CIDRs for clients requiring access to those resources. There are three private access CIDR variables provided so that you may specify distinct IP ranges needing access for each of the three different contexts:
+
+1. Cluster API Server Endpoint is Private - use `cluster_endpoint_private_access_cidrs` to indicate the client IP ranges needing access
+2. Jump or NFS Server VMs have only private IPs - use `vm_private_access_cidrs` to indicate the IP ranges for the DAC client VM needing access. DAC's baseline module will require SSH access to the Jump VM and/or NFS Server VM.
+3. VPC has no public egress - use `vpc_endpoint_private_access_cidrs` to allow access to AWS private link services required to build the cluster, e.g. EC2.
+
+For example, with a cluster API server endpoint that is private, the IAC client VM must have API server endpoint access during cluster creation to perform a health check. If your IAC client VM is not in your private subnet, its IP or CIDR range should be present in `cluster_endpoint_private_access_cidrs`.
+
+You can also use `default_private_access_cidrs` to apply the same CIDR range to all three private contexts. To set different CIDR ranges for a specific private context, set the appropriate variable. Use an empty list [] to disallow access explicitly.
+
+| Name
| Description
| Type
| Default
| Notes
|
+| :--- | :--- | :--- | :--- | :--- |
+| default_private_access_cidrs | IP address ranges that are allowed to access all created private cloud resources | list of strings | | Set a list of CIDR ranges that will be applied as a default value for `cluster_endpoint_private_access_cidrs`, `vpc_endpoint_private_access_cidrs` and `vm_private_access_cidrs`. **Note:** If you need to set distinct IP CIDR ranges for any of these contexts, use the specific variables below rather than this one. |
+| cluster_endpoint_private_access_cidrs | IP address ranges that are allowed to access the EKS cluster API Server endpoint| list of strings | | For clients needing access to the cluster api server endpoint (e.g. for VMs running terraform apply and for VMs where admins will use kubectl). Only used with `cluster_api_mode=private` |
+| vpc_endpoint_private_access_cidrs | IP address ranges that are allowed to access all AWS Services targeted by the VPC endpoints | list of strings | | Adds an ingress rule to the auxiliary security group (_prefix_-sg) protecting the VPC Endpoints, allowing HTTPS access at port 443. Only used with `vpc_private_endpoints_enabled=true`. |
+| vm_private_access_cidrs | IP address ranges that are allowed to access private IP based Jump or NFS Server VMs.| list of strings | | Opens port 22 for SSH access to the jump server and/or NFS VM by adding Ingress Rule on the Workers Security Group. Only used with `create_jump_public_ip=false` or `create_nfs_public_ip=false`. |
## Networking
- | Name | Description | Type | Default | Notes |
- | :--- | ---: | ---: | ---: | ---: |
- | vpc_cidr | Address space for the VPC | string | "192.168.0.0/16" | This variable is ignored when `vpc_id` is set (AKA bring your own VPC). |
- | subnets | Subnets to be created and their settings | map | See below for default values | This variable is ignored when `subnet_ids` is set (AKA bring your own subnets). All defined subnets must exist within the VPC address space. |
+| Name | Description | Type | Default | Notes |
+| :--- | ---: | ---: | ---: | ---: |
+| vpc_cidr | Address space for the VPC | string | "192.168.0.0/16" | This variable is ignored when `vpc_id` is set (AKA bring your own VPC). |
+| subnets | Subnets to be created and their settings | map | See below for default values | This variable is ignored when `subnet_ids` is set (AKA bring your own subnets). All defined subnets must exist within the VPC address space. |
+| subnet_azs | Configure specific AZs you want the subnets to be created in. The values must be distinct | optional map | {} see below for an example | If you wish for the codebase to lookup a list of AZs for you: either don't define subnet_azs to lookup AZs for all subnets, or omit the key for a specific subnet in the map to perform a lookup for it. This variable is ignored when `subnet_ids` is set (AKA bring your own subnets).|
+
+### Subnet requirements
+
+If no values are set for the [`subnet_ids`](#use-existing) configuration variable, `viya4-iac-aws` will create subnet resources for you. By default, a `private` subnet, two `control_plane` subnets, two `public` subnets and two `database` subnets will be created. The CIDR ranges selected for the default subnets are intended to provide sufficient IP address space for the cluster, any nodes and other Kubernetes resources that you expect to create in each subnet. The `private` subnet should provide sufficient IP address space for the maximum expected number of worker nodes and every pod launched within the EKS cluster. For the `control_plane` subnets, [AWS requires](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) at least two CIDR ranges, each in a different AZ, and recommends that a minimum of 16 IP addresses should be available in each subnet. If an external [postgres server](#postgresql-server) is configured, either two `public` or two `database` subnets, each in a different AZ are required. If public access to an external [postgres server](#postgresql-server) is configured by setting either `postgres_public_access_cidrs` or `default_public_access_cidrs`, the postgres server will use the public subnets, otherwise it will use the database subnets.
+
+For [BYO networking scenarios](../docs/user/BYOnetwork.md#supported-scenarios-and-requirements-for-using-existing-network-resources), existing subnet values are conveyed to `viya4-iac-aws` using the [`subnet_ids`](#use-existing) configuration variable. When configuring an EKS cluster without public access, the minimum subnet requirements include a `private` subnet with a recommended CIDR range of /18, and two `control_plane` subnets with recommended CIDR ranges of /28, each in a separate AZ. If an external postgres server is configured, two `database` subnets with recommended CIDR ranges of /25, each in a separate AZ also need to be provided. Amazon RDS relies on having two subnets in separate AZs to provide database redundancy should a failure occur in one AZ. If public access is required for your postgres server or cluster, supply two `public` subnets in different AZs instead of the `database` subnets.
The default values for the subnets variable are as follows:
```yaml
{
- "private" : ["192.168.0.0/18", "192.168.64.0/18"],
- "public" : ["192.168.129.0/25", "192.168.129.128/25"],
- "database" : ["192.168.128.0/25", "192.168.128.128/25"]
+ "private" : ["192.168.0.0/18"],
+ "control_plane" : ["192.168.130.0/28", "192.168.130.16/28"], # AWS recommends at least 16 IP addresses per subnet
+ "public" : ["192.168.129.0/25", "192.168.129.128/25"],
+ "database" : ["192.168.128.0/25", "192.168.128.128/25"]
}
```
+Example for `subnet_azs`:
+
+The zones defined below allow you to configure where each subnet in the map above will be created.
+e.g. Looking at the example `subnets` map above, for `"control_plane" : ["192.168.130.0/28", "192.168.130.16/28"]`, the first subnet will be created in `us-east-2c` and the second in `us-east-2b`
+
+```terraform
+subnet_azs = {
+ "private" : ["us-east-2c"],
+ "control_plane" : ["us-east-2c", "us-east-2b"],
+ "public" : ["us-east-2a", "us-east-2b"],
+ "database" : ["us-east-2a", "us-east-2b"]
+}
+```
+
+**Note:** supplying two or more subnets into the `private` list will deploy the node pools in a multi-az configuration, which the [SAS Platform Operations documentation](https://documentation.sas.com/?cdcId=itopscdc&cdcVersion=default&docsetId=itopssr&docsetTarget=n098rczq46ffjfn1xbgfzahytnmx.htm#p0vx68bmb3fs88n12d73wwxpsnhu) does not recommend
+
### Use Existing
If desired, you can deploy into an existing VPC, subnet and NAT gateway, and Security Group.
@@ -108,22 +156,31 @@ The variables in the table below can be used to define the existing resources. R
| Name | Description | Type | Default | Notes |
| :--- | ---: | ---: | ---: | ---: |
| vpc_id | ID of existing VPC | string | null | Only required if deploying into existing VPC. |
- | subnet_ids | List of existing subnets mapped to desired usage | map(string) | {} | Only required if deploying into existing subnets. |
- | nat_id | ID of existing AWS NAT gateway | string | null | Only required if deploying into existing VPC and subnets. |
+ | subnet_ids | List of existing subnets mapped to desired usage | map(string) | {} | Only required if deploying into existing subnets. See [Subnet requirements](#subnet-requirements) above.|
+ | nat_id | ID of existing AWS NAT gateway | string | null | Optional if deploying into existing VPC and subnets for [BYON scenarios 2 & 3](./user/BYOnetwork.md#supported-scenarios-and-requirements-for-using-existing-network-resources)|
| security_group_id | ID of existing Security Group that controls external access to Jump/NFS VMs and Postgres | string | null | Only required if using existing Security Group. See [Security Group](./user/BYOnetwork.md#external-access-security-group) for requirements. |
| cluster_security_group_id | ID of existing Security Group that controls Pod access to the control plane | string | null | Only required if using existing Cluster Security Group. See [Cluster Security Group](./user/BYOnetwork.md#cluster-security-group) for requirements.|
-| workers_security_group_id | ID of existing Security Group that allows access between node VMs, Jump VM, and data sourcess (nfs, efs, postges) | string | null | Only required if using existing Security Group for Node Group VMs. See [Workers Security Group](./user/BYOnetwork.md#workers-security-group) for requirements. |
+| workers_security_group_id | ID of existing Security Group that allows access between node VMs, Jump VM, and data sources (nfs, efs, postges) | string | null | Only required if using existing Security Group for Node Group VMs. See [Workers Security Group](./user/BYOnetwork.md#workers-security-group) for requirements. |
Example `subnet_ids` variable:
-```yaml
+```terraform
subnet_ids = {
"public" : ["existing-public-subnet-id1", "existing-public-subnet-id2"],
- "private" : ["existing-private-subnet-id1", "existing-private-subnet-id2"],
+ "private" : ["existing-private-subnet-id1"],
+ "control_plane" : ["existing-control-plane-subnet-id1", "existing-control-plane-subnet-id2"],
"database" : ["existing-database-subnet-id1","existing-database-subnet-id2"]
}
```
+**Note:** supplying two or more subnets into the `private` list will deploy the node pools in a multi-az configuration, which the [SAS Platform Operations documentation](https://documentation.sas.com/?cdcId=itopscdc&cdcVersion=default&docsetId=itopssr&docsetTarget=n098rczq46ffjfn1xbgfzahytnmx.htm#p0vx68bmb3fs88n12d73wwxpsnhu) does not recommend
+
+### VPC Endpoints
+| Name | Description | Type | Default | Notes |
+ | :--- | ---: | ---: | ---: | ---: |
+ | vpc_private_endpoints_enabled | Enable the creation of VPC private endpoints | bool | true | Setting to false prevents IaC from creating and managing VPC private endpoints in the cluster |
+
+
## IAM
By default, two custom IAM policies and two custom IAM roles (with instance profiles) are created. If your site security protocol does not allow for automatic creation of IAM resources, you can provide pre-created roles using the following options:
@@ -336,7 +393,7 @@ Each server element, like `foo = {}`, can contain none, some, or all of the para
| backup_retention_days | Backup retention days for the PostgreSQL server | number | 7 | Supported values are between 7 and 35 days. |
| storage_encrypted | Encrypt PostgreSQL data at rest | bool | false| |
| administrator_login | The Administrator Login for the PostgreSQL Server | string | "pgadmin" | The admin login name can not be 'admin', must start with a letter, and must be between 1-16 characters in length, and can only contain underscores, letters, and numbers. Changing this forces a new resource to be created |
-| administrator_password | The Password associated with the administrator_login for the PostgreSQL Server | string | "my$up3rS3cretPassw0rd" | The admin passsword must have more than 8 characters, and be composed of any printable characters except the following / ' \" @ characters. |
+| administrator_password | The Password associated with the administrator_login for the PostgreSQL Server | string | "my$up3rS3cretPassw0rd" | The admin password must have more than 8 characters, and be composed of any printable characters except the following / ' \" @ characters. |
| multi_az | Specifies if PostgreSQL instance is multi-AZ | bool | false | |
| deletion_protection | Protect from accidental resource deletion | bool | false | |
| ssl_enforcement_enabled | Enforce SSL on connections to PostgreSQL server instance | bool | true | |
diff --git a/docs/user/BYOnetwork.md b/docs/user/BYOnetwork.md
index 46eae4c5..7a2fbe3d 100644
--- a/docs/user/BYOnetwork.md
+++ b/docs/user/BYOnetwork.md
@@ -4,12 +4,14 @@ You have the option to use existing network resources with SAS Viya 4 Terraform
**NOTE:** We refer to the use of existing resources as "bring your own" or "BYO" resources.
-| Scenario|Required Variables|Additional Requirements|Resources to be Created|
-| :--- | :--- | :--- | :--- |
-| 1. To work with an existing VPC | `vpc_id` | - VPC does not contain any Subnets or other [Network components](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Networking.html)
- VPC block size must be IPv4 with '/16' netmask (supports 65,536 IP addresses)
- `DNS hostnames` and `DNS resolution` are enabled
- [`subnets`](../CONFIG-VARS.md#networking) CIDR blocks must match with VPC IPv4 CIDR block
| Subnets, NAT Gateway and Security Group|
-| 2. To configure all components of your VPC network - Subnets, Routes & associations, Internet and NAT Gateways | `vpc_id`,
`subnet_ids` and
`nat_id` | - all requirements from Scenario #1
- Subnets Availability Zones must be within the [location](../CONFIG-VARS.md#required-variables)
- AWS Tags with `` value replaced with the [prefix](../CONFIG-VARS.md#required-variables) input value for
- Public Subnets:- `{"kubernetes.io/role/elb"="1"}`
- `{"kubernetes.io/cluster/-eks"="shared"}`
-Private Subnets:- `{"kubernetes.io/role/internal-elb"="1"}`
- `{"kubernetes.io/cluster/-eks"="shared"}`
See [AWS docs](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) for background on subnet tag requirements to match EKS Cluster name| Security Group |
-| 3. To configure all components of your VPC network and Security Groups | `vpc_id`,
`subnet_ids`,
`nat_id`,
`security_group_id`,
`cluster_security_group_id`, and
`workers_security_group_id` |- all requirements from Scenarios #2 and [these pre-defined Security Groups](#security-groups)
| None |
+|Scenario |Description|Required Variables|Optional Variables|Additional Requirements|Resources to be Created|
+| -: | :--- | :--- | :--- | :--- | :---|
+| 0|No existing network resources | None | | Not a BYO network scenario | IaC creates the required network resources |
+| 1|To work with an existing VPC | `vpc_id` | | - VPC does not contain any Subnets or other [Network components](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Networking.html)
- VPC block size must be IPv4 with '/16' netmask (supports 65,536 IP addresses)
- `DNS hostnames` and `DNS resolution` are enabled
- [`subnets`](../CONFIG-VARS.md#networking) CIDR blocks must match with VPC IPv4 CIDR block
| Subnets, NAT Gateway and Security Groups|
+| 2|To configure all components of your VPC network - Subnets, Routes & associations and optionally Internet and NAT Gateways | `vpc_id`,
one `private` subnet and two `control_plane` subnets within the [subnet_ids](../CONFIG-VARS.md#use-existing) map,
see [Subnet requirements](../CONFIG-VARS.md#subnet-requirements) | `nat_id`,
`public` and `database` subnet lists within the [subnet_ids](../CONFIG-VARS.md#use-existing) map | - all requirements from Scenario #1
- Subnets Availability Zones must be within the [location](../CONFIG-VARS.md#required-variables)
- AWS Tags with `` value replaced with the [prefix](../CONFIG-VARS.md#required-variables) input value for
- Public Subnets:- `{"kubernetes.io/role/elb"="1"}`
- `{"kubernetes.io/cluster/-eks"="shared"}`
-Private Subnets:- `{"kubernetes.io/role/internal-elb"="1"}`
- `{"kubernetes.io/cluster/-eks"="shared"}`
See [AWS docs](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html) for background on subnet tag requirements to match EKS Cluster name| Security Groups |
+| 3|To configure all components of your VPC network and Security Groups and optionally Internet and NAT Gateways| `vpc_id`,
one `private` subnet and two `control_plane` subnets within the [subnet_ids](../CONFIG-VARS.md#use-existing) map,
see [Subnet requirements](../CONFIG-VARS.md#subnet-requirements),
`security_group_id`,
`cluster_security_group_id`, and
`workers_security_group_id` | `nat_id`,
`public` and `database` subnet lists within the [subnet_ids](../CONFIG-VARS.md#use-existing) map |- all requirements from Scenarios #2 and [these pre-defined Security Groups](#security-groups)
| None |
+**Note**: The `byo_network_scenario` IAC output value is informational only and is intended to convey the BYO network scenario that IAC has selected according to the [Use Existing](../CONFIG-VARS.md#use-existing) input variable values provided to IAC.
### Security Groups
@@ -50,9 +52,9 @@ For more information on these Security Groups, please see https://docs.aws.amazo
When creating your BYO Network resources you should consult with your Network Administrator and use any of these methods to create a working AWS VPC Network:
- [AWS QuickStarts for VPC](https://aws.amazon.com/quickstart/architecture/vpc/)
-- See the "simple-vpc" and "complete-vpc" examples in [terraform-aws-vpc module](https://github.com/terraform-aws-modules/terraform-aws-vpc/tree/master/examples)
+- See the "simple-vpc" and "complete-vpc" examples in [terraform-aws-vpc module](https://github.com/terraform-aws-modules/terraform-aws-vpc/tree/master/examples)
-AWS documentation for reference:
+AWS documentation for reference:
- [How Amazon VPC works](https://docs.aws.amazon.com/vpc/latest/userguide/how-it-works.html)
- [VPC and subnet sizing for IPv4](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html#vpc-sizing-ipv4)
diff --git a/docs/user/TerraformAWSAuthentication.md b/docs/user/TerraformAWSAuthentication.md
index f7a8adc8..69ff7905 100644
--- a/docs/user/TerraformAWSAuthentication.md
+++ b/docs/user/TerraformAWSAuthentication.md
@@ -1,6 +1,6 @@
# Authenticating Terraform to Access AWS
-In order to create and destroy AWS resources on your behalf, Terraform needs a AWS account that has sufficient permissions to perform all the actions defined in the Terraform manifest. You will need an AWS account IAM user that has at a mininum the permissions listed in [this policy](../../files/policies/devops-iac-eks-policy.json).
+In order to create and destroy AWS resources on your behalf, Terraform needs a AWS account that has sufficient permissions to perform all the actions defined in the Terraform manifest. You will need an AWS account IAM user that has at a minimum the permissions listed in [this policy](../../files/policies/devops-iac-eks-policy.json).
You can either use static credentials (including temporary credentials with session token) or a [profile with a credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html).
@@ -28,7 +28,7 @@ TF_VAR_aws_secret_access_key=
TF_VAR_aws_session_token=
```
-> **NOTE** `AWS_SESSION_TOKEN` is optional and is only required when using you are using temporary AWS credentials. See the [AWS documention](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html) on environment variables for more information.
+> **NOTE** `AWS_SESSION_TOKEN` is optional and is only required when using you are using temporary AWS credentials. See the [AWS documentation](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html) on environment variables for more information.
## Using AWS Profile with Credentials File
diff --git a/docs/user/TerraformUsage.md b/docs/user/TerraformUsage.md
index 4c584f76..427c5bb4 100644
--- a/docs/user/TerraformUsage.md
+++ b/docs/user/TerraformUsage.md
@@ -54,7 +54,7 @@ terraform apply
This command can take a few minutes to complete. Once it has completed, Terraform output values are written to the console. The `kubeconfig` file for the cluster is written to `[prefix]-eks-kubeconfig.conf` in the current directory, `$(pwd)`.
-### Display Terrafrom Output
+### Display Terraform Output
Once the cloud resources have been created using the `terraform apply` command, Terraform output values can be displayed again later at any time by running the following command:
@@ -68,7 +68,7 @@ After provisioning the infrastructure, you can make additional changes by modify
### Tear Down Cloud Resources
-To destroy all the cloud resources created with the previous comamnds, run the following command:
+To destroy all the cloud resources created with the previous commands, run the following command:
```bash
terraform destroy
diff --git a/files/custom-data/additional_userdata.sh b/files/custom-data/additional_userdata.sh
index abd27236..85c3978f 100644
--- a/files/custom-data/additional_userdata.sh
+++ b/files/custom-data/additional_userdata.sh
@@ -22,12 +22,12 @@ FILESYSTEM_BLOCK_SIZE=${FILESYSTEM_BLOCK_SIZE:-4096} # Bytes
STRIDE=$(expr $RAID_CHUNK_SIZE \* 1024 / $FILESYSTEM_BLOCK_SIZE || true)
STRIPE_WIDTH=$(expr $SSD_NVME_DEVICE_COUNT \* $STRIDE || true)
-# Checking if provisioning already happend
+# Checking if provisioning already happened
if [[ "$(ls -A /pv-disks)" ]]
then
echo 'Volumes already present in "/pv-disks"'
echo -e "\n$(ls -Al /pv-disks | tail -n +2)\n"
- echo "I assume that provisioning already happend, doing nothing!"
+ echo "I assume that provisioning already happened, doing nothing!"
exit 0
fi
diff --git a/linting-configs/.tflint.hcl b/linting-configs/.tflint.hcl
index 8a125a87..7b6e4873 100644
--- a/linting-configs/.tflint.hcl
+++ b/linting-configs/.tflint.hcl
@@ -14,7 +14,7 @@ config {
plugin "aws" {
enabled = true
- version = "0.23.0"
+ version = "0.27.0"
source = "github.com/terraform-linters/tflint-ruleset-aws"
}
diff --git a/locals.tf b/locals.tf
index d34a82d2..eb96deb2 100755
--- a/locals.tf
+++ b/locals.tf
@@ -7,7 +7,8 @@ locals {
aws_caller_identity_user_name = element(split("/", data.aws_caller_identity.terraform.arn), length(split("/", data.aws_caller_identity.terraform.arn)) - 1)
# General
- security_group_id = var.security_group_id == null ? aws_security_group.sg[0].id : data.aws_security_group.sg[0].id
+ sec_group = (length(aws_security_group.sg_a) == 0 && length(aws_security_group.sg_b) == 0) ? null : coalescelist(aws_security_group.sg_a, aws_security_group.sg_b)
+ security_group_id = var.security_group_id == null ? local.sec_group[0].id : data.aws_security_group.sg[0].id
cluster_security_group_id = var.cluster_security_group_id == null ? aws_security_group.cluster_security_group[0].id : var.cluster_security_group_id
workers_security_group_id = var.workers_security_group_id == null ? aws_security_group.workers_security_group[0].id : var.workers_security_group_id
cluster_name = "${var.prefix}-eks"
@@ -20,17 +21,32 @@ locals {
aws_shared_credentials = local.use_aws_shared_credentials_file ? [var.aws_shared_credentials_file] : var.aws_shared_credentials_files
# CIDRs
- default_public_access_cidrs = var.default_public_access_cidrs == null ? [] : var.default_public_access_cidrs
- vm_public_access_cidrs = var.vm_public_access_cidrs == null ? local.default_public_access_cidrs : var.vm_public_access_cidrs
- cluster_endpoint_public_access_cidrs = var.cluster_api_mode == "private" ? [] : (var.cluster_endpoint_public_access_cidrs == null ? local.default_public_access_cidrs : var.cluster_endpoint_public_access_cidrs)
- cluster_endpoint_private_access_cidrs = var.cluster_endpoint_private_access_cidrs == null ? distinct(concat(module.vpc.public_subnet_cidrs, module.vpc.private_subnet_cidrs)) : var.cluster_endpoint_private_access_cidrs # tflint-ignore: terraform_unused_declarations
- postgres_public_access_cidrs = var.postgres_public_access_cidrs == null ? local.default_public_access_cidrs : var.postgres_public_access_cidrs
+ default_public_access_cidrs = var.default_public_access_cidrs == null ? [] : var.default_public_access_cidrs
+ default_private_access_cidrs = var.default_private_access_cidrs == null ? [] : var.default_private_access_cidrs
+
+ vm_public_access_cidrs = var.vm_public_access_cidrs == null ? local.default_public_access_cidrs : var.vm_public_access_cidrs
+ vm_private_access_cidrs = var.vm_private_access_cidrs == null ? local.default_private_access_cidrs : var.vm_private_access_cidrs
+
+ cluster_endpoint_public_access_cidrs = var.cluster_api_mode == "private" ? [] : (var.cluster_endpoint_public_access_cidrs == null ? local.default_public_access_cidrs : var.cluster_endpoint_public_access_cidrs)
+
+ cluster_endpoint_private_access_cidrs = var.cluster_endpoint_private_access_cidrs == null ? distinct(concat(module.vpc.public_subnet_cidrs, module.vpc.private_subnet_cidrs, local.default_private_access_cidrs)) : distinct(concat(module.vpc.public_subnet_cidrs, module.vpc.private_subnet_cidrs, local.default_private_access_cidrs, var.cluster_endpoint_private_access_cidrs)) # tflint-ignore: terraform_unused_declarations
+
+ vpc_endpoint_private_access_cidrs = var.vpc_endpoint_private_access_cidrs == null ? distinct(concat(module.vpc.public_subnet_cidrs, module.vpc.private_subnet_cidrs, local.default_private_access_cidrs)) : distinct(concat(module.vpc.public_subnet_cidrs, module.vpc.private_subnet_cidrs, local.default_private_access_cidrs, var.vpc_endpoint_private_access_cidrs))
+
+ postgres_public_access_cidrs = var.postgres_public_access_cidrs == null ? local.default_public_access_cidrs : var.postgres_public_access_cidrs
# Subnets
jump_vm_subnet = var.create_jump_public_ip ? module.vpc.public_subnets[0] : module.vpc.private_subnets[0]
nfs_vm_subnet = var.create_nfs_public_ip ? module.vpc.public_subnets[0] : module.vpc.private_subnets[0]
nfs_vm_subnet_az = var.create_nfs_public_ip ? module.vpc.public_subnet_azs[0] : module.vpc.private_subnet_azs[0]
+ # Generate list of AZ where created subnets should be placed
+ # If not specified by the user replace with list of all AZs in a region
+ public_subnet_azs = can(var.subnet_azs["public"]) ? var.subnet_azs["public"] : data.aws_availability_zones.available.names
+ private_subnet_azs = can(var.subnet_azs["private"]) ? var.subnet_azs["private"] : data.aws_availability_zones.available.names
+ database_subnet_azs = can(var.subnet_azs["database"]) ? var.subnet_azs["database"] : data.aws_availability_zones.available.names
+ control_plane_subnet_azs = can(var.subnet_azs["control_plane"]) ? var.subnet_azs["control_plane"] : data.aws_availability_zones.available.names
+
ssh_public_key = (var.create_jump_vm || var.storage_type == "standard"
? file(var.ssh_public_key)
: null
@@ -45,7 +61,7 @@ locals {
# Kubernetes
kubeconfig_filename = "${local.cluster_name}-kubeconfig.conf"
kubeconfig_path = var.iac_tooling == "docker" ? "/workspace/${local.kubeconfig_filename}" : local.kubeconfig_filename
- kubeconfig_ca_cert = data.aws_eks_cluster.cluster.certificate_authority[0].data
+ kubeconfig_ca_cert = module.eks.cluster_certificate_authority_data
# Mapping node_pools to node_groups
default_node_pool = {
@@ -87,6 +103,9 @@ locals {
launch_template_use_name_prefix = true
launch_template_tags = { Name = "${local.cluster_name}-default" }
tags = var.autoscaling_enabled ? merge(local.tags, { key = "k8s.io/cluster-autoscaler/${local.cluster_name}", value = "owned", propagate_at_launch = true }, { key = "k8s.io/cluster-autoscaler/enabled", value = "true", propagate_at_launch = true }) : local.tags
+ # Node Pool IAM Configuration
+ iam_role_use_name_prefix = false
+ iam_role_name = "${var.prefix}-default-eks-node-group"
}
}
@@ -133,6 +152,9 @@ locals {
launch_template_use_name_prefix = true
launch_template_tags = { Name = "${local.cluster_name}-${key}" }
tags = var.autoscaling_enabled ? merge(local.tags, { key = "k8s.io/cluster-autoscaler/${local.cluster_name}", value = "owned", propagate_at_launch = true }, { key = "k8s.io/cluster-autoscaler/enabled", value = "true", propagate_at_launch = true }) : local.tags
+ # Node Pool IAM Configuration
+ iam_role_use_name_prefix = false
+ iam_role_name = "${var.prefix}-${key}-eks-node-group"
}
}
@@ -147,10 +169,10 @@ locals {
postgres_outputs = length(module.postgresql) != 0 ? { for k, v in module.postgresql :
k => {
- "server_name" : module.postgresql[k].db_instance_id,
+ "server_name" : module.postgresql[k].db_instance_identifier,
"fqdn" : module.postgresql[k].db_instance_address,
"admin" : module.postgresql[k].db_instance_username,
- "password" : module.postgresql[k].db_instance_password,
+ "password" : local.postgres_servers[k].administrator_password,
"server_port" : module.postgresql[k].db_instance_port
"ssl_enforcement_enabled" : local.postgres_servers[k].ssl_enforcement_enabled,
"internal" : false
diff --git a/main.tf b/main.tf
index 7b9d7ca3..745961fa 100755
--- a/main.tf
+++ b/main.tf
@@ -17,12 +17,8 @@ provider "aws" {
}
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
+ name = module.eks.cluster_name
}
data "aws_availability_zones" "available" {}
@@ -44,14 +40,14 @@ resource "kubernetes_config_map" "sas_iac_buildinfo" {
}
data = {
- git-hash = lookup(data.external.git_hash.result, "git-hash")
+ git-hash = data.external.git_hash.result["git-hash"]
timestamp = chomp(timestamp())
iac-tooling = var.iac_tooling
terraform = < 0 ? true : false
- subnet_ids = length(local.postgres_public_access_cidrs) > 0 ? module.vpc.public_subnets : module.vpc.database_subnets
+ subnet_ids = length(local.postgres_public_access_cidrs) > 0 ? length(module.vpc.public_subnets) > 0 ? module.vpc.public_subnets : module.vpc.database_subnets : module.vpc.database_subnets
# DB parameter group
family = "postgres${each.value.server_version}"
@@ -263,11 +276,11 @@ module "postgresql" {
options = each.value.options
# Flags for module to flag if postgres should be created or not.
- create_db_instance = true
- create_db_subnet_group = true
- create_db_parameter_group = true
- create_db_option_group = true
- create_random_password = false
+ create_db_instance = true
+ create_db_subnet_group = true
+ create_db_parameter_group = true
+ create_db_option_group = true
+ manage_master_user_password = false
}
# Resource Groups - https://www.terraform.io/docs/providers/aws/r/resourcegroups_group.html
diff --git a/modules/aws_autoscaling/main.tf b/modules/aws_autoscaling/main.tf
index 8fb54455..f5d9cd45 100644
--- a/modules/aws_autoscaling/main.tf
+++ b/modules/aws_autoscaling/main.tf
@@ -6,7 +6,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = "~> 5.0"
+ version = "5.25.0"
}
}
}
@@ -69,7 +69,7 @@ resource "aws_iam_policy" "worker_autoscaling" {
module "iam_assumable_role_with_oidc" {
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
- version = "4.24.1"
+ version = "5.30.2"
create_role = true
role_name = "${var.prefix}-cluster-autoscaler"
diff --git a/modules/aws_ebs_csi/main.tf b/modules/aws_ebs_csi/main.tf
index 181dd198..37cf1666 100644
--- a/modules/aws_ebs_csi/main.tf
+++ b/modules/aws_ebs_csi/main.tf
@@ -6,7 +6,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = "~> 5.0"
+ version = "5.25.0"
}
}
}
@@ -167,7 +167,7 @@ EOT
module "iam_assumable_role_with_oidc" {
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
- version = "4.24.1"
+ version = "5.30.2"
create_role = true
role_name = "${var.prefix}-ebs-csi-role"
diff --git a/modules/aws_vm/main.tf b/modules/aws_vm/main.tf
index 439a2b94..457bb621 100644
--- a/modules/aws_vm/main.tf
+++ b/modules/aws_vm/main.tf
@@ -7,7 +7,7 @@
locals {
device_name = [
# "/dev/sdb", - NOTE: These are skipped, Ubuntu Server 20.04 LTS
- # "/dev/sdc", uses these for ephmeral storage.
+ # "/dev/sdc", uses these for ephemeral storage.
"/dev/sdd",
"/dev/sde",
"/dev/sdf",
diff --git a/modules/aws_vm/variables.tf b/modules/aws_vm/variables.tf
index 88a7c030..8121de5d 100644
--- a/modules/aws_vm/variables.tf
+++ b/modules/aws_vm/variables.tf
@@ -2,7 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
variable "name" {
- type = string
+ description = "Name to assign the VM"
+ type = string
}
variable "tags" {
@@ -12,77 +13,102 @@ variable "tags" {
}
variable "vm_type" {
- default = "m5.4xlarge"
+ description = "EC2 instance type"
+ type = string
+ default = "m5.4xlarge"
}
variable "cloud_init" {
- default = ""
-}
-
-variable "postgres_administrator_login" {
- description = "The Administrator Login for the PostgreSQL Server. Changing this forces a new resource to be created."
- default = "pgadmin"
+ description = "Cloud init script to execute"
+ type = string
+ default = ""
}
variable "vm_admin" {
- description = "OS Admin User for VMs of AKS Cluster nodes"
+ description = "OS Admin User for VMs of EC2 instance"
+ type = string
default = "azureuser"
}
variable "ssh_public_key" {
description = "Path to ssh public key"
+ type = string
default = ""
}
variable "security_group_ids" {
- default = []
+ description = "List of security group ids to associate with the EC2 instance"
+ type = list(string)
+ default = []
}
variable "create_public_ip" {
- default = false
+ description = "Toggle the creation of a public EIP to be associated with the EC2 instance"
+ type = bool
+ default = false
}
variable "data_disk_count" {
- default = 0
+ description = "Number of disks to attach to the EC2 instance"
+ type = number
+ default = 0
}
variable "data_disk_size" {
- default = 128
+ description = "Size of disk to attach to the EC2 instance in GiBs"
+ type = number
+ default = 128
}
variable "data_disk_type" {
- default = "gp2"
+ description = "The type of EBS volume for the data disk"
+ type = string
+ default = "gp2"
}
variable "data_disk_availability_zone" {
- default = ""
+ description = "The AZ where the EBS volume will exist"
+ type = string
+ default = ""
}
variable "data_disk_iops" {
- default = 0
+ description = "The amount of IOPS to provision for the data disk"
+ type = number
+ default = 0
}
variable "os_disk_size" {
- default = 64
+ description = "The size of the OS disk"
+ type = number
+ default = 64
}
variable "os_disk_type" {
- default = "standard"
+ description = "The type of EBS volume for the OS disk"
+ type = string
+ default = "standard"
}
variable "os_disk_delete_on_termination" {
- default = true
+ description = "Delete disk on termination"
+ type = bool
+ default = true
}
variable "os_disk_iops" {
- default = 0
+ description = "The amount of IOPS to provision for the OS disk"
+ type = number
+ default = 0
}
variable "subnet_id" {
- type = string
+ description = "The VPC Subnet ID to launch in."
+ type = string
}
variable "enable_ebs_encryption" {
description = "Enable encryption on EBS volumes."
+ type = bool
default = false
}
diff --git a/modules/aws_vpc/main.tf b/modules/aws_vpc/main.tf
index a0463bb9..214b11d4 100644
--- a/modules/aws_vpc/main.tf
+++ b/modules/aws_vpc/main.tf
@@ -10,10 +10,20 @@ locals {
existing_public_subnets = local.existing_subnets && contains(keys(var.existing_subnet_ids), "public") ? (length(var.existing_subnet_ids["public"]) > 0 ? true : false) : false
existing_private_subnets = local.existing_subnets && contains(keys(var.existing_subnet_ids), "private") ? (length(var.existing_subnet_ids["private"]) > 0 ? true : false) : false
existing_database_subnets = local.existing_subnets && contains(keys(var.existing_subnet_ids), "database") ? (length(var.existing_subnet_ids["database"]) > 0 ? true : false) : false
+ existing_control_plane_subnets = local.existing_subnets && contains(keys(var.existing_subnet_ids), "control_plane") ? (length(var.existing_subnet_ids["control_plane"]) > 0 ? true : false) : false
- public_subnets = local.existing_public_subnets ? data.aws_subnet.public : aws_subnet.public
+ # public_subnets = local.existing_public_subnets ? data.aws_subnet.public : aws_subnet.public # not used keeping for ref
private_subnets = local.existing_private_subnets ? data.aws_subnet.private : aws_subnet.private
+ control_plane_subnets = local.existing_control_plane_subnets ? data.aws_subnet.control_plane : aws_subnet.control_plane
+ # Use private subnets if we are not creating db subnets and there are no existing db subnets
+ database_subnets = local.existing_database_subnets ? data.aws_subnet.database : element(concat(aws_subnet.database[*].id, tolist([""])), 0) != "" ? aws_subnet.database : local.private_subnets
+
+ byon_tier = var.vpc_id == null ? 0 : local.existing_private_subnets ? (var.raw_sec_group_id == null && var.cluster_security_group_id == null && var.workers_security_group_id == null) ? 2 : 3 : 1
+ byon_scenario = local.byon_tier
+
+ create_nat_gateway = (local.byon_scenario == 0 || local.byon_scenario == 1) ? true : false
+ create_subnets = (local.byon_scenario == 0 || local.byon_scenario == 1) ? true : false
}
data "aws_vpc" "vpc" {
@@ -38,48 +48,53 @@ resource "aws_vpc" "vpc" {
}
resource "aws_vpc_endpoint" "private_endpoints" {
- count = length(var.vpc_private_endpoints)
- vpc_id = local.vpc_id
- service_name = "com.amazonaws.${var.region}.${var.vpc_private_endpoints[count.index]}"
- vpc_endpoint_type = "Interface"
- security_group_ids = [var.security_group_id]
+ for_each = var.vpc_private_endpoints_enabled ? var.vpc_private_endpoints : {}
+ vpc_id = local.vpc_id
+ service_name = "com.amazonaws.${var.region}.${each.key}"
+ vpc_endpoint_type = each.value
+ security_group_ids = each.value == "Interface" ? [var.security_group_id] : null
+ private_dns_enabled = each.value == "Interface" ? true : null
tags = merge(
{
- "Name" = format("%s", "${var.name}-private-endpoint-${var.vpc_private_endpoints[count.index]}")
+ "Name" = format("%s", "${var.name}-private-endpoint-${each.key}")
},
var.tags,
)
- subnet_ids = [
+ subnet_ids = each.value == "Interface" ? [
for subnet in local.private_subnets : subnet.id
- ]
+ ] : null
}
data "aws_subnet" "public" {
- count = local.existing_public_subnets ? length(var.subnets["public"]) : 0
+ count = local.existing_public_subnets ? length(var.existing_subnet_ids["public"]) : 0
id = element(var.existing_subnet_ids["public"], count.index)
}
data "aws_subnet" "private" {
- count = local.existing_private_subnets ? length(var.subnets["private"]) : 0
+ count = local.existing_private_subnets ? length(var.existing_subnet_ids["private"]) : 0
id = element(var.existing_subnet_ids["private"], count.index)
}
data "aws_subnet" "database" {
- count = local.existing_database_subnets ? length(var.subnets["database"]) : 0
+ count = local.existing_database_subnets ? length(var.existing_subnet_ids["database"]) : 0
id = element(var.existing_subnet_ids["database"], count.index)
}
+data "aws_subnet" "control_plane" {
+ count = local.existing_control_plane_subnets ? length(var.existing_subnet_ids["control_plane"]) : 0
+ id = element(var.existing_subnet_ids["control_plane"], count.index)
+}
################
# Public subnet
################
resource "aws_subnet" "public" {
- count = local.existing_public_subnets ? 0 : length(var.subnets["public"])
+ count = local.existing_public_subnets ? 0 : local.create_subnets ? length(var.subnets["public"]) : 0
vpc_id = local.vpc_id
cidr_block = element(var.subnets["public"], count.index)
- availability_zone = length(regexall("^[a-z]{2}-", element(var.azs, count.index))) > 0 ? element(var.azs, count.index) : null
- availability_zone_id = length(regexall("^[a-z]{2}-", element(var.azs, count.index))) == 0 ? element(var.azs, count.index) : null
+ availability_zone = length(regexall("^[a-z]{2}-", element(var.public_subnet_azs, count.index))) > 0 ? element(var.public_subnet_azs, count.index) : null
+ availability_zone_id = length(regexall("^[a-z]{2}-", element(var.public_subnet_azs, count.index))) == 0 ? element(var.public_subnet_azs, count.index) : null
map_public_ip_on_launch = var.map_public_ip_on_launch
tags = merge(
@@ -87,7 +102,7 @@ resource "aws_subnet" "public" {
"Name" = format(
"%s-${var.public_subnet_suffix}-%s",
var.name,
- element(var.azs, count.index),
+ element(var.public_subnet_azs, count.index),
)
},
var.tags,
@@ -99,7 +114,7 @@ resource "aws_subnet" "public" {
# Internet Gateway
###################
resource "aws_internet_gateway" "this" {
- count = var.existing_nat_id == null ? 1 : 0
+ count = var.existing_nat_id == null ? local.create_nat_gateway ? 1 : 0 : 0
vpc_id = local.vpc_id
@@ -115,7 +130,7 @@ resource "aws_internet_gateway" "this" {
# Publiс routes
################
resource "aws_route_table" "public" {
- count = local.existing_public_subnets ? 0 : 1
+ count = local.existing_public_subnets ? 0 : local.create_subnets ? 1 : 0
vpc_id = local.vpc_id
tags = merge(
@@ -123,7 +138,7 @@ resource "aws_route_table" "public" {
"Name" = format(
"%s-${var.public_subnet_suffix}-%s",
var.name,
- element(var.azs, count.index),
+ element(var.public_subnet_azs, count.index),
)
},
var.tags,
@@ -131,7 +146,7 @@ resource "aws_route_table" "public" {
}
resource "aws_route" "public_internet_gateway" {
- count = var.existing_nat_id == null ? 1 : 0
+ count = var.existing_nat_id == null ? local.create_nat_gateway ? 1 : 0 : 0
route_table_id = aws_route_table.public[0].id
destination_cidr_block = "0.0.0.0/0"
@@ -153,19 +168,25 @@ resource "aws_route_table_association" "private" {
}
resource "aws_route_table_association" "public" {
- count = local.existing_public_subnets ? 0 : length(var.subnets["public"])
+ count = local.existing_public_subnets ? 0 : local.create_subnets ? length(var.subnets["public"]) : 0
subnet_id = element(aws_subnet.public[*].id, count.index)
route_table_id = element(aws_route_table.public[*].id, 0)
}
resource "aws_route_table_association" "database" {
- count = local.existing_database_subnets ? 0 : length(var.subnets["database"])
+ count = local.existing_database_subnets ? 0 : local.create_subnets ? length(var.subnets["database"]) : 0
subnet_id = element(aws_subnet.database[*].id, count.index)
route_table_id = element(aws_route_table.private[*].id, 0)
}
+resource "aws_route_table_association" "control_plane" {
+ count = local.existing_control_plane_subnets ? 0 : local.create_subnets ? length(var.subnets["control_plane"]) : 0
+
+ subnet_id = element(aws_subnet.control_plane[*].id, count.index)
+ route_table_id = element(aws_route_table.private[*].id, 0)
+}
#################
# Private subnet
#################
@@ -173,15 +194,15 @@ resource "aws_subnet" "private" {
count = local.existing_private_subnets ? 0 : length(var.subnets["private"])
vpc_id = local.vpc_id
cidr_block = element(var.subnets["private"], count.index)
- availability_zone = length(regexall("^[a-z]{2}-", element(var.azs, count.index))) > 0 ? element(var.azs, count.index) : null
- availability_zone_id = length(regexall("^[a-z]{2}-", element(var.azs, count.index))) == 0 ? element(var.azs, count.index) : null
+ availability_zone = length(regexall("^[a-z]{2}-", element(var.private_subnet_azs, count.index))) > 0 ? element(var.private_subnet_azs, count.index) : null
+ availability_zone_id = length(regexall("^[a-z]{2}-", element(var.private_subnet_azs, count.index))) == 0 ? element(var.private_subnet_azs, count.index) : null
tags = merge(
{
"Name" = format(
"%s-${var.private_subnet_suffix}-%s",
var.name,
- element(var.azs, count.index),
+ element(var.private_subnet_azs, count.index),
)
},
var.tags,
@@ -203,7 +224,7 @@ resource "aws_route_table" "private" {
"Name" = format(
"%s-${var.private_subnet_suffix}-%s",
var.name,
- element(var.azs, count.index),
+ element(var.private_subnet_azs, count.index),
)
},
var.tags,
@@ -214,18 +235,18 @@ resource "aws_route_table" "private" {
# Database subnet
##################
resource "aws_subnet" "database" {
- count = local.existing_database_subnets ? 0 : length(var.subnets["database"])
+ count = local.existing_database_subnets ? 0 : local.create_subnets ? length(var.subnets["database"]) : 0
vpc_id = local.vpc_id
cidr_block = element(var.subnets["database"], count.index)
- availability_zone = length(regexall("^[a-z]{2}-", element(var.azs, count.index))) > 0 ? element(var.azs, count.index) : null
- availability_zone_id = length(regexall("^[a-z]{2}-", element(var.azs, count.index))) == 0 ? element(var.azs, count.index) : null
+ availability_zone = length(regexall("^[a-z]{2}-", element(var.database_subnet_azs, count.index))) > 0 ? element(var.database_subnet_azs, count.index) : null
+ availability_zone_id = length(regexall("^[a-z]{2}-", element(var.database_subnet_azs, count.index))) == 0 ? element(var.database_subnet_azs, count.index) : null
tags = merge(
{
"Name" = format(
"%s-${var.database_subnet_suffix}-%s",
var.name,
- element(var.azs, count.index),
+ element(var.database_subnet_azs, count.index),
)
},
var.tags,
@@ -233,7 +254,7 @@ resource "aws_subnet" "database" {
}
resource "aws_db_subnet_group" "database" {
- count = local.existing_database_subnets == false && length(var.subnets["database"]) > 0 ? 1 : 0
+ count = local.existing_database_subnets == false ? local.create_subnets ? contains(keys(var.subnets), "database") ? length(var.subnets["database"]) > 0 ? 1 : 0 : 0 : 0 : 0
name = lower(var.name)
description = "Database subnet group for ${var.name}"
@@ -247,8 +268,30 @@ resource "aws_db_subnet_group" "database" {
)
}
+#################
+# Control Plane subnet
+#################
+resource "aws_subnet" "control_plane" {
+ count = local.existing_control_plane_subnets ? 0 : length(var.subnets["control_plane"])
+ vpc_id = local.vpc_id
+ cidr_block = element(var.subnets["control_plane"], count.index)
+ availability_zone = length(regexall("^[a-z]{2}-", element(var.control_plane_subnet_azs, count.index))) > 0 ? element(var.control_plane_subnet_azs, count.index) : null
+ availability_zone_id = length(regexall("^[a-z]{2}-", element(var.control_plane_subnet_azs, count.index))) == 0 ? element(var.control_plane_subnet_azs, count.index) : null
+
+ tags = merge(
+ {
+ "Name" = format(
+ "%s-${var.control_plane_subnet_suffix}-%s",
+ var.name,
+ element(var.control_plane_subnet_azs, count.index),
+ )
+ },
+ var.tags,
+ )
+}
+
resource "aws_eip" "nat" {
- count = var.existing_nat_id == null ? 1 : 0
+ count = var.existing_nat_id == null ? local.create_nat_gateway ? 1 : 0 : 0
domain = "vpc"
@@ -257,7 +300,7 @@ resource "aws_eip" "nat" {
"Name" = format(
"%s-%s",
var.name,
- element(var.azs, count.index),
+ element(var.public_subnet_azs, count.index),
)
},
var.tags,
@@ -270,7 +313,7 @@ data "aws_nat_gateway" "nat_gateway" {
}
resource "aws_nat_gateway" "nat_gateway" {
- count = var.existing_nat_id == null ? 1 : 0
+ count = var.existing_nat_id == null ? local.create_nat_gateway ? 1 : 0 : 0
allocation_id = element(aws_eip.nat[*].id, 0)
subnet_id = local.existing_public_subnets ? element(data.aws_subnet.public[*].id, 0) : element(aws_subnet.public[*].id, 0)
@@ -280,7 +323,7 @@ resource "aws_nat_gateway" "nat_gateway" {
"Name" = format(
"%s-%s",
var.name,
- element(var.azs, 0),
+ element(var.public_subnet_azs, 0),
)
},
var.tags,
@@ -290,7 +333,7 @@ resource "aws_nat_gateway" "nat_gateway" {
}
resource "aws_route" "private_nat_gateway" {
- count = var.existing_nat_id == null ? 1 : 0
+ count = var.existing_nat_id == null ? local.create_nat_gateway ? 1 : 0 : 0
route_table_id = element(aws_route_table.private[*].id, count.index)
destination_cidr_block = "0.0.0.0/0"
diff --git a/modules/aws_vpc/outputs.tf b/modules/aws_vpc/outputs.tf
index 651479b3..2a7c8f6a 100644
--- a/modules/aws_vpc/outputs.tf
+++ b/modules/aws_vpc/outputs.tf
@@ -42,12 +42,17 @@ output "private_subnet_cidrs" {
output "database_subnets" {
description = "List of IDs of database subnets"
- value = local.existing_database_subnets ? data.aws_subnet.database[*].id : aws_subnet.database[*].id
+ value = local.existing_database_subnets ? data.aws_subnet.database[*].id : local.database_subnets[*].id
+}
+
+output "control_plane_subnets" {
+ description = "List of IDs of control plane subnets"
+ value = local.existing_control_plane_subnets ? data.aws_subnet.control_plane[*].id : local.control_plane_subnets[*].id
}
output "nat_public_ips" {
description = "List of public Elastic IPs created for AWS NAT Gateway"
- value = var.existing_nat_id == null ? aws_eip.nat[*].public_ip : data.aws_nat_gateway.nat_gateway[*].public_ip
+ value = var.existing_nat_id == null ? local.create_nat_gateway ? aws_eip.nat[*].public_ip : null : data.aws_nat_gateway.nat_gateway[*].public_ip
}
output "public_route_table_ids" {
@@ -64,3 +69,13 @@ output "vpc_cidr" {
description = "CIDR block of VPC"
value = var.vpc_id == null ? var.cidr : data.aws_vpc.vpc[0].cidr_block
}
+
+output "byon_scenario" {
+ description = "The BYO networking configuration (0,1,2, or 3) determined by the set of networking input values configured"
+ value = local.byon_scenario
+}
+
+output "create_nat_gateway" {
+ description = "The networking configuration will create a NAT gateway"
+ value = local.create_nat_gateway
+}
diff --git a/modules/aws_vpc/variables.tf b/modules/aws_vpc/variables.tf
index 3e035270..4ab3cb4b 100644
--- a/modules/aws_vpc/variables.tf
+++ b/modules/aws_vpc/variables.tf
@@ -1,20 +1,40 @@
# Copyright © 2021-2023, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
-variable "azs" {
- description = "A list of availability zones names or ids in the region"
+variable "public_subnet_azs" {
+ description = "A list of availability zones names or ids in the region for creating the public subnets"
+ type = list(string)
+ default = []
+}
+
+variable "private_subnet_azs" {
+ description = "A list of availability zones names or ids in the region for creating the private subnets"
+ type = list(string)
+ default = []
+}
+
+variable "control_plane_subnet_azs" {
+ description = "A list of availability zones names or ids in the region for creating the control plane subnets"
+ type = list(string)
+ default = []
+}
+
+variable "database_subnet_azs" {
+ description = "A list of availability zones names or ids in the region for creating the database subnets"
type = list(string)
default = []
}
variable "vpc_id" {
description = "Existing vpc id"
+ type = string
default = null
}
variable "name" {
- type = string
- default = null
+ description = "Prefix used when creating VPC resources"
+ type = string
+ default = null
}
variable "cidr" {
@@ -39,18 +59,6 @@ variable "existing_nat_id" {
description = "Pre-existing VPC NAT Gateway id"
}
-variable "enable_nat_gateway" {
- description = "Should be true if you want to provision NAT Gateways for each of your private networks"
- type = bool
- default = true
-}
-
-variable "single_nat_gateway" {
- description = "Should be true if you want to provision a single shared NAT Gateway across all of your private networks"
- type = bool
- default = true
-}
-
variable "enable_dns_hostnames" {
description = "Should be true to enable DNS hostnames in the VPC"
type = bool
@@ -63,7 +71,6 @@ variable "enable_dns_support" {
default = true
}
-
variable "tags" {
description = "The tags to associate with your network and subnets."
type = map(string)
@@ -100,6 +107,12 @@ variable "database_subnet_suffix" {
default = "db"
}
+variable "control_plane_subnet_suffix" {
+ description = "Suffix to append to control plane subnets name"
+ type = string
+ default = "control-plane"
+}
+
variable "map_public_ip_on_launch" {
description = "Should be false if you do not want to auto-assign public IP on launch"
type = bool
@@ -108,8 +121,23 @@ variable "map_public_ip_on_launch" {
variable "vpc_private_endpoints" {
description = "Endpoints needed for private cluster"
- type = list(string)
- default = ["ec2", "ecr.api", "ecr.dkr", "s3", "logs", "sts", "elasticloadbalancing", "autoscaling"]
+ type = map(string)
+ default = {
+ "ec2" = "Interface",
+ "ecr.api" = "Interface",
+ "ecr.dkr" = "Interface",
+ "s3" = "Gateway",
+ "logs" = "Interface",
+ "sts" = "Interface",
+ "elasticloadbalancing" = "Interface",
+ "autoscaling" = "Interface"
+ }
+}
+
+variable "vpc_private_endpoints_enabled" {
+ description = "Enable the creation of vpc private endpoint resources"
+ type = bool
+ default = true
}
variable "region" {
@@ -118,6 +146,21 @@ variable "region" {
}
variable "security_group_id" {
- description = "Security Group ID"
+ description = "Security Group ID local variable value"
+ type = string
+}
+
+variable "raw_sec_group_id" {
+ description = "Security Group ID input variable value"
+ type = string
+}
+
+variable "cluster_security_group_id" {
+ description = "Cluster Security Group ID input variable value"
+ type = string
+}
+
+variable "workers_security_group_id" {
+ description = "Workers Security Group ID input variable value"
type = string
}
diff --git a/modules/kubeconfig/main.tf b/modules/kubeconfig/main.tf
index 99b75e13..bc736524 100644
--- a/modules/kubeconfig/main.tf
+++ b/modules/kubeconfig/main.tf
@@ -6,11 +6,11 @@ terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
- version = "~> 2.20"
+ version = "2.23.0"
}
local = {
source = "hashicorp/local"
- version = "~> 2.4"
+ version = "2.4.0"
}
}
}
@@ -42,6 +42,10 @@ locals {
}
+data "aws_security_group" "selected" {
+ id = var.sg_id
+}
+
data "kubernetes_secret" "sa_secret" {
count = var.create_static_kubeconfig ? 1 : 0
metadata {
@@ -61,8 +65,12 @@ resource "kubernetes_secret" "sa_secret" {
"kubernetes.io/service-account.name" = local.service_account_name
}
}
- type = "kubernetes.io/service-account-token"
- depends_on = [kubernetes_service_account.kubernetes_sa]
+ type = "kubernetes.io/service-account-token"
+
+ depends_on = [
+ kubernetes_service_account.kubernetes_sa,
+ data.aws_security_group.selected,
+ ]
}
# Starting K8s v1.24+ hashicorp/terraform-provider-kubernetes issues warning message:
@@ -90,6 +98,10 @@ resource "kubernetes_cluster_role_binding" "kubernetes_crb" {
name = local.service_account_name
namespace = var.namespace
}
+
+ depends_on = [
+ data.aws_security_group.selected,
+ ]
}
# kube config file generation
diff --git a/modules/kubeconfig/output.tf b/modules/kubeconfig/outputs.tf
similarity index 100%
rename from modules/kubeconfig/output.tf
rename to modules/kubeconfig/outputs.tf
diff --git a/modules/kubeconfig/variables.tf b/modules/kubeconfig/variables.tf
index 71a2dbda..c5e0ca27 100644
--- a/modules/kubeconfig/variables.tf
+++ b/modules/kubeconfig/variables.tf
@@ -13,6 +13,7 @@ variable "namespace" {
}
variable "region" {
+ description = "AWS Region this cluster was provisioned in"
type = string
default = null
}
@@ -24,17 +25,26 @@ variable "create_static_kubeconfig" {
}
variable "path" {
+ description = "Path to output the kubeconfig file"
type = string
}
variable "cluster_name" {
+ description = "Kubernetes cluster name"
type = string
}
variable "endpoint" {
+ description = "Kubernetes cluster endpoint"
type = string
}
variable "ca_crt" {
+ description = "Kubernetes CA certificate"
+ type = string
+}
+
+variable "sg_id" {
+ description = "Security group ID"
type = string
}
diff --git a/outputs.tf b/outputs.tf
index 5a874847..e3a95969 100755
--- a/outputs.tf
+++ b/outputs.tf
@@ -104,7 +104,7 @@ output "postgres_servers" {
}
output "nat_ip" {
- value = module.vpc.nat_public_ips[0]
+ value = module.vpc.create_nat_gateway ? module.vpc.nat_public_ips[0] : null
}
output "prefix" {
@@ -145,7 +145,7 @@ output "ebs_csi_account" {
}
output "k8s_version" {
- value = data.aws_eks_cluster.cluster.version
+ value = module.eks.cluster_version
}
output "aws_shared_credentials_file" {
@@ -170,7 +170,8 @@ output "storage_type_backend" {
condition = (var.storage_type == "standard" && var.storage_type_backend == "nfs"
|| var.storage_type == "ha" && var.storage_type_backend == "nfs"
|| var.storage_type == "ha" && var.storage_type_backend == "efs"
- || var.storage_type == "ha" && var.storage_type_backend == "ontap")
+ || var.storage_type == "ha" && var.storage_type_backend == "ontap"
+ || var.storage_type == "none" && var.storage_type_backend == "none")
error_message = "nfs is the only valid storage_type_backend when storage_type == 'standard'"
}
}
@@ -179,3 +180,27 @@ output "aws_fsx_ontap_fsxadmin_password" {
value = (local.storage_type_backend == "ontap" ? var.aws_fsx_ontap_fsxadmin_password : null)
sensitive = true
}
+
+output "byo_network_scenario" {
+ value = module.vpc.byon_scenario
+}
+
+output "validate_subnet_azs" {
+ # validation, no output value needed
+ value = null
+ precondition {
+ # Validation Notes:
+ # Either the user does not define subnet_azs and it defaults to {}, in which case the whole map will be populated
+ # If the user does not define a specific key, that is allowed and we will populate the az list for that subnet
+ # Lastly, if the user does define a specific subnet_azs key, it must be greater than or equal to the matching subnet map list
+ condition = (var.subnet_azs == {} ||
+ (
+ (length(lookup(var.subnet_azs, "private", [])) == 0 || length(lookup(var.subnet_azs, "private", [])) >= length(lookup(var.subnets, "private", []))) &&
+ (length(lookup(var.subnet_azs, "control_plane", [])) == 0 || length(lookup(var.subnet_azs, "control_plane", [])) >= length(lookup(var.subnets, "control_plane", []))) &&
+ (length(lookup(var.subnet_azs, "public", [])) == 0 || length(lookup(var.subnet_azs, "public", [])) >= length(lookup(var.subnets, "public", []))) &&
+ (length(lookup(var.subnet_azs, "database", [])) == 0 || length(lookup(var.subnet_azs, "database", [])) >= length(lookup(var.subnets, "database", [])))
+ )
+ )
+ error_message = "Your subnet_azs keys must have a string list value of AZs greater than or equal to the list of CIDRs in equivalent key in the subnets variable."
+ }
+}
diff --git a/security.tf b/security.tf
index dec8e74c..d1b7b56f 100644
--- a/security.tf
+++ b/security.tf
@@ -7,11 +7,12 @@ data "aws_security_group" "sg" {
}
# Security Groups - https://www.terraform.io/docs/providers/aws/r/security_group.html
-resource "aws_security_group" "sg" {
- count = var.security_group_id == null ? 1 : 0
+resource "aws_security_group" "sg_a" {
+ count = var.security_group_id == null && var.vpc_private_endpoints_enabled == false ? 1 : 0
name = "${var.prefix}-sg"
vpc_id = module.vpc.vpc_id
+ description = "Auxiliary security group associated with RDS ENIs and VPC Endpoint ENIs as well as Jump/NFS VM ENIs when they have public IPs"
egress {
description = "Allow all outbound traffic."
from_port = 0
@@ -22,6 +23,30 @@ resource "aws_security_group" "sg" {
tags = merge(local.tags, { "Name" : "${var.prefix}-sg" })
}
+# Security Groups - https://www.terraform.io/docs/providers/aws/r/security_group.html
+resource "aws_security_group" "sg_b" {
+ count = var.security_group_id == null && var.vpc_private_endpoints_enabled ? 1 : 0
+ name = "${var.prefix}-sg"
+ vpc_id = module.vpc.vpc_id
+
+ description = "Auxiliary security group associated with RDS ENIs and VPC Endpoint ENIs as well as Jump/NFS VM ENIs when they have public IPs"
+ egress {
+ description = "Allow all outbound traffic."
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ ingress {
+ description = "Allow tcp port 443 ingress to all AWS Services targeted by the VPC endpoints"
+ from_port = 443
+ to_port = 443
+ protocol = "tcp"
+ cidr_blocks = local.vpc_endpoint_private_access_cidrs
+ }
+ tags = merge(local.tags, { "Name" : "${var.prefix}-sg" })
+}
+
resource "aws_security_group_rule" "vms" {
count = (length(local.vm_public_access_cidrs) > 0
&& var.security_group_id == null
@@ -93,6 +118,13 @@ resource "aws_security_group" "cluster_security_group" {
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
+ ingress {
+ description = "Allow additional HTTPS/443 ingress to private EKS cluster API server endpoint per var.cluster_endpoint_private_access_cidrs"
+ from_port = 443
+ to_port = 443
+ protocol = "tcp"
+ cidr_blocks = local.cluster_endpoint_private_access_cidrs
+ }
}
@@ -105,11 +137,10 @@ resource "aws_security_group_rule" "cluster_ingress" {
from_port = 443
to_port = 443
protocol = "tcp"
- source_security_group_id = aws_security_group.workers_security_group[0].id
+ source_security_group_id = local.workers_security_group_id
security_group_id = local.cluster_security_group_id
}
-
resource "aws_security_group" "workers_security_group" {
name = "${var.prefix}-eks_worker_sg"
vpc_id = module.vpc.vpc_id
@@ -144,7 +175,7 @@ resource "aws_security_group_rule" "worker_self" {
count = var.workers_security_group_id == null ? 1 : 0
type = "ingress"
- description = "Allow node to comunicate with each other."
+ description = "Allow node to communicate with each other."
from_port = 0
protocol = "-1"
self = true
@@ -157,7 +188,7 @@ resource "aws_security_group_rule" "worker_cluster_api" {
count = var.workers_security_group_id == null ? 1 : 0
type = "ingress"
- description = "Allow workers pods to receive communication from the cluster control plane."
+ description = "Allow worker pods to receive communication from the cluster control plane."
from_port = 1025
protocol = "tcp"
source_security_group_id = local.cluster_security_group_id
@@ -177,3 +208,22 @@ resource "aws_security_group_rule" "worker_cluster_api_443" {
to_port = 443
security_group_id = aws_security_group.workers_security_group[0].id
}
+
+
+resource "aws_security_group_rule" "vm_private_access_22" {
+
+ count = (length(local.vm_private_access_cidrs) > 0
+ && var.workers_security_group_id == null
+ && ((var.create_jump_public_ip == false && var.create_jump_vm)
+ || (var.create_nfs_public_ip == false && var.storage_type == "standard")
+ )
+ ? 1 : 0
+ )
+ type = "ingress"
+ description = "Allow SSH to a private IP based Jump VM per var.vm_private_access_cidrs. Required for DAC baseline client VM."
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = local.vm_private_access_cidrs
+ security_group_id = aws_security_group.workers_security_group[0].id
+}
diff --git a/variables.tf b/variables.tf
index 574d7465..d8544bed 100644
--- a/variables.tf
+++ b/variables.tf
@@ -61,9 +61,15 @@ variable "iac_tooling" {
default = "terraform"
}
-## Public Access
+## Public & Private Access
variable "default_public_access_cidrs" {
- description = "List of CIDRs to access created resources."
+ description = "List of CIDRs to access created resources - Public."
+ type = list(string)
+ default = null
+}
+
+variable "default_private_access_cidrs" {
+ description = "List of CIDRs to access created resources - Private."
type = list(string)
default = null
}
@@ -80,8 +86,20 @@ variable "cluster_endpoint_private_access_cidrs" {
default = null
}
+variable "vpc_endpoint_private_access_cidrs" {
+ description = "List of CIDRs to access VPC endpoints - Private."
+ type = list(string)
+ default = null
+}
+
variable "vm_public_access_cidrs" {
- description = "List of CIDRs to access jump VM or NFS VM."
+ description = "List of CIDRs to access jump VM or NFS VM - Public."
+ type = list(string)
+ default = null
+}
+
+variable "vm_private_access_cidrs" {
+ description = "List of CIDRs to access jump VM or NFS VM - Private."
type = list(string)
default = null
}
@@ -338,7 +356,8 @@ variable "subnet_ids" {
# Example:
# subnet_ids = { # only needed if using pre-existing subnets
# "public" : ["existing-public-subnet-id1", "existing-public-subnet-id2"],
- # "private" : ["existing-private-subnet-id1", "existing-private-subnet-id2"],
+ # "private" : ["existing-private-subnet-id1"],
+ # "control_plane" : ["existing-control-plane-subnet-id1", "existing-control-plane-subnet-id2"],
# "database" : ["existing-database-subnet-id1", "existing-database-subnet-id2"] # only when 'create_postgres=true'
# }
}
@@ -353,12 +372,25 @@ variable "subnets" {
description = "Subnets to be created and their settings - This variable is ignored when `subnet_ids` is set (AKA bring your own subnets)."
type = map(list(string))
default = {
- "private" : ["192.168.0.0/18", "192.168.64.0/18"],
+ "private" : ["192.168.0.0/18"],
+ "control_plane" : ["192.168.130.0/28", "192.168.130.16/28"], # AWS recommends at least 16 IP addresses per subnet
"public" : ["192.168.129.0/25", "192.168.129.128/25"],
"database" : ["192.168.128.0/25", "192.168.128.128/25"]
}
}
+variable "subnet_azs" {
+ description = "AZs you want the subnets to created in - This variable is ignored when `subnet_ids` is set (AKA bring your own subnets)."
+ type = map(list(string))
+ default = {}
+ nullable = false
+
+ # We only support configuring the AZs for the public, private, control_plane, and database subnet
+ validation {
+ condition = var.subnet_azs == {} || alltrue([for subnet in keys(var.subnet_azs) : contains(["public", "private", "control_plane", "database"], subnet)])
+ error_message = "ERROR: public, private, control_plane, and database are the only keys allowed in the subnet_azs map"
+ }
+}
variable "security_group_id" {
description = "Pre-existing Security Group id. Leave blank to have one created."
type = string
@@ -599,8 +631,23 @@ variable "cluster_api_mode" {
variable "vpc_private_endpoints" { # tflint-ignore: terraform_unused_declarations
description = "Endpoints needed for private cluster."
- type = list(string)
- default = ["ec2", "ecr.api", "ecr.dkr", "s3", "logs", "sts", "elasticloadbalancing", "autoscaling"]
+ type = map(string)
+ default = {
+ "ec2" = "Interface",
+ "ecr.api" = "Interface",
+ "ecr.dkr" = "Interface",
+ "s3" = "Gateway",
+ "logs" = "Interface",
+ "sts" = "Interface",
+ "elasticloadbalancing" = "Interface",
+ "autoscaling" = "Interface"
+ }
+}
+
+variable "vpc_private_endpoints_enabled" {
+ description = "Enable the creation of vpc private endpoint resources"
+ type = bool
+ default = true
}
variable "cluster_node_pool_mode" {
diff --git a/versions.tf b/versions.tf
index 17a44599..b593c28f 100644
--- a/versions.tf
+++ b/versions.tf
@@ -6,7 +6,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = "5.4.0"
+ version = "5.25.0"
}
random = {
source = "hashicorp/random"
@@ -26,7 +26,7 @@ terraform {
}
kubernetes = {
source = "hashicorp/kubernetes"
- version = "2.20.0"
+ version = "2.23.0"
}
tls = {
source = "hashicorp/tls"