From 0ece330399f7383ad4649107f78bad54e26f29de Mon Sep 17 00:00:00 2001 From: chaitu6022 Date: Mon, 28 Jun 2021 08:44:51 -0700 Subject: [PATCH 1/3] AWS Load Balancer Ingress controller --- aws/README.md | 641 ++++++++++++++++++++----------- aws/modules/eks/variables.tf | 2 +- aws/modules/vpc/main.tf | 9 +- aws/scripts/aws-auth-cm.yaml | 32 ++ aws/scripts/iam_policy.json | 207 ++++++++++ aws/scripts/manifest.yaml | 44 +++ aws/scripts/oicd_policy.json | 54 +++ aws/scripts/policy_document.json | 2 +- gcp/modules/iam/outputs.tf | 1 - test/grafana-ingress.yml | 18 + test/ingress.yml | 38 ++ test/nodeport.yaml | 29 ++ test/reaper-ingress.yaml | 12 +- test/reaper-nodeport.yaml | 14 - 14 files changed, 853 insertions(+), 250 deletions(-) create mode 100644 aws/scripts/aws-auth-cm.yaml create mode 100644 aws/scripts/iam_policy.json create mode 100644 aws/scripts/manifest.yaml create mode 100644 aws/scripts/oicd_policy.json create mode 100644 test/grafana-ingress.yml create mode 100644 test/ingress.yml create mode 100644 test/nodeport.yaml delete mode 100644 test/reaper-nodeport.yaml diff --git a/aws/README.md b/aws/README.md index dbe23d4..b9b9dfb 100644 --- a/aws/README.md +++ b/aws/README.md @@ -1,225 +1,416 @@ -# K8ssandra AWS Terraform Module - -## What is Elastic Kubernetes Service(EKS)? -[Amazon Elastic Kubernetes Service](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html) is a managed Kubernetes service on AWS, it gives user flexibility to run and scale Kubernetes applications in the AWS cloud. - - -## Terraform Resources created -* EKS cluster -* EKS node group -* IAM roles -* IAM role policy -* IAM role policy attachment -* S3 Bucket -* S3 Bucket public access block -* Virtual Private Cloud -* Subnets -* Security Groups -* Security Group rule -* NAT Gateway -* Internet Gateway -* elastic IP -* Route Table -* Route Table association - -## Project directory Structure -
-aws/
- ├──modules/
- |  ├──s3
- |     ├── main.tf 
- |     └── variables.tf 
- |     └── outputs.tf 
- |     └── README.md 
- |  ├──vpc
- |     ├── main.tf 
- |     └── variables.tf 
- |     └── outputs.tf 
- |     └── README.md 
- |  ├──iam
- |     ├── main.tf 
- |     └── variables.tf 
- |     └── outputs.tf 
- |     └── README.md
- |  ├──eks
- |     ├── main.tf 
- |     └── variables.tf 
- |     └── outputs.tf 
- |     └── README.md
- |
- ├──env
- |   ├── dev.tf
- |    ../modules/vpc
- |    ../modules/iam
- |    ../modules/eks
- |    ../modules/s3
- |  ├── version.tf 
- |  └── variables.tf 
- |  └── outputs.tf
- |  └── README.md
- |
- ├──scripts
- |  ├── apply.sh
- |  └── common.sh
- |  └── delete_bucket.py
- |  └── destroy.sh
- |  └── init.sh
- |  └── make_bucket.py
- |  └── plan.sh
- |  └── README.md
- └──README.md
-
- -## Prerequisites - -| NAME | Version | -|---------------------|------------| -| Terraform version | 0.14 | -| aws provider | ~>3.0 | -| Helm version | v3.5.3 | -| AWS CLI | version2 | -| boto 3 | | -| kubectl | 1.17.17 | -| python | 3 | -|aws-iam-authenticator| 0.5.2 | - -### Backend - * Terraform uses persistent state data to keep track of the resources it manages. Since it needs the state in order to know which real-world infrastructure objects correspond to the resources in a configuration, everyone working with a given collection of infrastructure resources must be able to access the same state data. - * Terraform backend configuration: - [Configuring your backend in aws s3](https://www.terraform.io/docs/language/settings/backends/s3.html) - * Terraform state - [How Terraform state works](https://www.terraform.io/docs/language/state/index.html) - -Following Backend template is to store your backend remotely in s3 bucket, if you are planning to use remote backend copy the following sample template to [./env](#./env) folder and update the backend file with your bucket name, key and your bucket region. - -If you don't want to use the remote backend, you can use the local local directory to store the state files. `terraform init` will generate a state file and it will be stored in your local directory under [./env/.terraform](./env). - -Sample template to configure your backend in s3 bucket: -``` - terraform { - backend "s3" { - bucket = "" - key = "" - region = "" - } - } -``` - -### Access - -* Access to an existing AWS cloud as a owner or a developer. following permissions are required - * Managed policies( These policies are Managed by the AWS, you can locate them in the attach existing policies section). - * AmazonEKSClusterPolicy - * AmazonEKSWorkerNodePolicy - * AmazonEKSServicePolicy - * AmazonEKSVPCResourceController - * Custom policy document located here.[policy_document](#./scripts/policy_document.json). In this JSON file there are three different policy documents. - * [IAM-Developer](#./scripts/policy_document.json/policy-for-IAM) Policy to manage IAM access to the user. - * [AutoScaling Group](#./scripts/policy_document.json/Policy-for-AutoScaling-Group) Policy to manage AutoScaling Group access permissions. - * [EC2&S3_policy](#./scripts/policy_document.json/policy-for-S3-and-EC2) Policy to manage EC2 and S3 permissions. - you will need to create three different policies, we cannot able to add all the policies in the single document, it will exceeds the resource limit on the policy document. - -### Tools - -* Bash and common command line tools (Make, etc.) -* [Terraform v0.14.0+](https://www.terraform.io/downloads.html) -* [AWS CLI v2](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -* [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) that matches the latest generally-available EKS cluster version. - -#### Install Terraform - -Terraform is used to automate the manipulation of cloud infrastructure. Its [Terraform installation instructions](https://www.terraform.io/intro/getting-started/install.html) are also available online. - -#### AWS IAM authenticator - -Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM authenticator for Kubernetes. Follow the instructions to install [aws-iam-authenticator installation instructions](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html). - -#### Install kubectl -Kubernetes uses a command-line utility called `kubectl` for communicating with the cluster API server. The kubectl binary is available in many operating system package managers, and this option is often much easier than a manual download and install process. Follow the instructions to install [kubectl installation instructions](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html). - -### Configure AWSCLI -Run `aws configure` command to configure your AWS cli, This Terraform module utilizes AWS CLI v2, if you have older versions of AWS CLI use the following instructions to uninstall older versions. - -* [Uninstall AWS cli v1](https://docs.aws.amazon.com/cli/latest/userguide/install-linux.html) -* [Install AWS cli v2](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) - -```console -aws configure -``` -Enter `access key`, `secret access key` default `region`. - -## Test this project locally - -Export the following terraform environment variables(TFVARS) for terraform to create the resources. if you don't want to export any values, you skip this part and proceed further. - -```console - -# Environment name, eg. "dev" -# bash, zsh -export TF_VAR_environment=dev - -#fish -set -x TF_VAR_environment dev - -# Kubernetes cluster name, eg. "k8ssandra" -# bash, zsh -export TF_VAR_name=k8ssandra - -#fish -set -x TF_VAR_name k8ssandra - -# Resource Owner name, eg. "k8ssandra" -# bash, zsh -export TF_VAR_resource_owner=k8ssandra - -#fish -set -x TF_VAR_resource_owner k8ssandra - - -# AWS region name, eg. "us-east-1" -# bash, zsh -export TF_VAR_region=us-east-1 - -#fish -set -x TF_VAR_region us-east-1 - -``` - -Important: Initialize the terraform modules and downloads required providers. - -```console -cd env/ - -terraform init -``` - -Run the following commands to plan and apply changes to your infrastructure. - -If you miss export any values or don't want to import any values, by running the following commands they are going prompt for required values in the `variable.tf`. you can enter those values through command-line. - -```console -terraform plan - -terraform apply -``` - -To destroy the resource, use the following instructions: - -It is important to export or pass the right values when destroying the resources on a local workspace. Make sure you exported(TF_VAR) or enter right environment variables. - -Verify the resources before you destroy Used the following command. - -```console -terraform plan -destroy -``` - -Run the following command to destroy all the resources in your local workspace. - -```console -terraform destroy -``` -or - -```console -terraform destroy -auto-approve -``` +# K8ssandra AWS Terraform Module + +## What is Elastic Kubernetes Service(EKS)? +[Amazon Elastic Kubernetes Service](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html) is a managed Kubernetes service on AWS, it gives user flexibility to run and scale Kubernetes applications in the AWS cloud. + + +## Terraform Resources created +* EKS cluster +* EKS node group +* IAM roles +* IAM role policy +* IAM role policy attachment +* S3 Bucket +* S3 Bucket public access block +* Virtual Private Cloud +* Subnets +* Security Groups +* Security Group rule +* NAT Gateway +* Internet Gateway +* elastic IP +* Route Table +* Route Table association + +## Project directory Structure +
+aws/
+ ├──modules/
+ |  ├──s3
+ |     ├── main.tf 
+ |     └── variables.tf 
+ |     └── outputs.tf 
+ |     └── README.md 
+ |  ├──vpc
+ |     ├── main.tf 
+ |     └── variables.tf 
+ |     └── outputs.tf 
+ |     └── README.md 
+ |  ├──iam
+ |     ├── main.tf 
+ |     └── variables.tf 
+ |     └── outputs.tf 
+ |     └── README.md
+ |  ├──eks
+ |     ├── main.tf 
+ |     └── variables.tf 
+ |     └── outputs.tf 
+ |     └── README.md
+ |
+ ├──env
+ |   ├── dev.tf
+ |    ../modules/vpc
+ |    ../modules/iam
+ |    ../modules/eks
+ |    ../modules/s3
+ |  ├── version.tf 
+ |  └── variables.tf 
+ |  └── outputs.tf
+ |  └── README.md
+ |
+ ├──scripts
+ |  ├── apply.sh
+ |  └── common.sh
+ |  └── delete_bucket.py
+ |  └── destroy.sh
+ |  └── init.sh
+ |  └── make_bucket.py
+ |  └── plan.sh
+ |  └── README.md
+ └──README.md
+
+ +## Prerequisites + +| NAME | Version | +|---------------------|------------| +| Terraform version | 0.14 | +| aws provider | ~>3.0 | +| Helm version | v3.5.3 | +| AWS CLI | version2 | +| boto 3 | | +| kubectl | 1.17.17 | +| python | 3 | +|aws-iam-authenticator| 0.5.2 | + +### Backend + * Terraform uses persistent state data to keep track of the resources it manages. Since it needs the state in order to know which real-world infrastructure objects correspond to the resources in a configuration, everyone working with a given collection of infrastructure resources must be able to access the same state data. + * Terraform backend configuration: + [Configuring your backend in aws s3](https://www.terraform.io/docs/language/settings/backends/s3.html) + * Terraform state + [How Terraform state works](https://www.terraform.io/docs/language/state/index.html) + +Following Backend template is to store your backend remotely in s3 bucket, if you are planning to use remote backend copy the following sample template to [./env](#./env) folder and update the backend file with your bucket name, key and your bucket region. + +If you don't want to use the remote backend, you can use the local local directory to store the state files. `terraform init` will generate a state file and it will be stored in your local directory under [./env/.terraform](./env). + +Sample template to configure your backend in s3 bucket: +``` + terraform { + backend "s3" { + bucket = "" + key = "" + region = "" + } + } +``` + +### Access + +* Access to an existing AWS cloud as a owner or a developer. following permissions are required + * Managed policies( These policies are Managed by the AWS, you can locate them in the attach existing policies section). + * AmazonEKSClusterPolicy + * AmazonEKSWorkerNodePolicy + * AmazonEKSServicePolicy + * AmazonEKSVPCResourceController + * Custom policy document located here.[policy_document](#./scripts/policy_document.json). In this JSON file there are three different policy documents. + * [IAM-Developer](#./scripts/policy_document.json/policy-for-IAM) Policy to manage IAM access to the user. + * [AutoScaling Group](#./scripts/policy_document.json/Policy-for-AutoScaling-Group) Policy to manage AutoScaling Group access permissions. + * [EC2&S3_policy](#./scripts/policy_document.json/policy-for-S3-and-EC2) Policy to manage EC2 and S3 permissions. + you will need to create three different policies, we cannot able to add all the policies in the single document, it will exceeds the resource limit on the policy document. + +### Tools + +* Bash and common command line tools (Make, etc.) +* [Terraform v0.14.0+](https://www.terraform.io/downloads.html) +* [AWS CLI v2](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) +* [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) that matches the latest generally-available EKS cluster version. + +#### Install Terraform + +Terraform is used to automate the manipulation of cloud infrastructure. Its [Terraform installation instructions](https://www.terraform.io/intro/getting-started/install.html) are also available online. + +#### AWS IAM authenticator + +Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM authenticator for Kubernetes. Follow the instructions to install [aws-iam-authenticator installation instructions](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html). + +#### Install kubectl +Kubernetes uses a command-line utility called `kubectl` for communicating with the cluster API server. The kubectl binary is available in many operating system package managers, and this option is often much easier than a manual download and install process. Follow the instructions to install [kubectl installation instructions](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html). + +### Configure AWSCLI +Run `aws configure` command to configure your AWS cli, This Terraform module utilizes AWS CLI v2, if you have older versions of AWS CLI use the following instructions to uninstall older versions. + +* [Uninstall AWS cli v1](https://docs.aws.amazon.com/cli/latest/userguide/install-linux.html) +* [Install AWS cli v2](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) + +```console +aws configure +``` +Enter `access key`, `secret access key` default `region`. + +## Test this project locally + +Export the following terraform environment variables(TFVARS) for terraform to create the resources. if you don't want to export any values, you skip this part and proceed further. + +```console + +# Environment name, eg. "dev" +# bash, zsh +export TF_VAR_environment=dev + +#fish +set -x TF_VAR_environment dev + +# Kubernetes cluster name, eg. "k8ssandra" +# bash, zsh +export TF_VAR_name=k8ssandra + +#fish +set -x TF_VAR_name k8ssandra + +# Resource Owner name, eg. "k8ssandra" +# bash, zsh +export TF_VAR_resource_owner=k8ssandra + +#fish +set -x TF_VAR_resource_owner k8ssandra + + +# AWS region name, eg. "us-east-1" +# bash, zsh +export TF_VAR_region=us-east-1 + +#fish +set -x TF_VAR_region us-east-1 + +``` + +Important: Initialize the terraform modules and downloads required providers. + +```console +cd env/ + +terraform init +``` + +Run the following commands to plan and apply changes to your infrastructure. + +If you miss export any values or don't want to import any values, by running the following commands they are going prompt for required values in the `variable.tf`. you can enter those values through command-line. + +```console +terraform plan + +terraform apply +``` + +**To destroy the resource, use the following instructions:** + +It is important to export or pass the right values when destroying the resources on a local workspace. Make sure you exported(TF_VAR) or enter right environment variables. + +Verify the resources before you destroy Used the following command. + +```console +terraform plan -destroy +``` + +Run the following command to destroy all the resources in your local workspace. + +```console +terraform destroy +``` +or + +```console +terraform destroy -auto-approve +``` + +## Setup AWS Ingress Controller +OpenID Connect (OIDC) Identity Provider (IDP) This feature allows customers to integrate an OIDC identity provider with a new or existing Amazon EKS cluster running Kubernetes version 1.16 or later. The OIDC IDP can be used as an alternative to, or along with AWS Identity and Access Management (IAM). With this feature, you can manage user access to your cluster by leveraging existing identity management life cycle through your OIDC identity provider. + +**enable OIDC provider:** +```console +eksctl utils associate-iam-oidc-provider --region --cluster --approve +``` +eg:- eksctl utils associate-iam-oidc-provider --region us-east-1 --cluster dev-k8ssandra-eks-cluster --approve + + +**create IAM policy:** +Locate the IAM policy document and provide path and create a policy, If the policy exists skip to the next step +```console +aws iam create-policy --policy-name AWSLoadBalancerControllerIAMPolicy --policy-document file://iam_policy.json +``` +Get the ARN of the IAM policy, save it for future use. + +eg:- aws iam create-policy --policy-name AWSLoadBalancerControllerIAMPolicy --policy-document file://iam_policy.json + +**create a service account user the policy arn create above:** + +Get the Account number +```console +aws sts get-caller-identity +``` +Get the policy ARN we created earlier. ARN of the policy example +eg:- arn:aws:iam::xxxxxxxxxxxx:policy/AWSLoadBalancerControllerIAMPolicy + +Create an IAM role and annotate the Kubernetes service account named aws-load-balancer-controller in the kube-system namespace for the AWS Load Balancer Controller using eksctl or the AWS Management Console and kubectl. + +```console +eksctl create iamserviceaccount --cluster= --region= --namespace=kube-system --name=aws-load-balancer-controller --attach-policy-arn=arn:aws:iam::xxxxxxxxxxxx:policy/AWSLoadBalancerControllerIAMPolicy --override-existing-serviceaccounts --approve +``` + +eg:- +eksctl create iamserviceaccount --cluster=dev-k8ssandra-eks-cluster --region=us-east-1 --namespace=kube-system --name=aws-load-balancer-controller --attach-policy-arn=arn:aws:iam::xxxxxxxxxxx:policy/AWSLoadBalancerControllerIAMPolicy --override-existing-serviceaccounts --approve + +If you currently have the AWS ALB Ingress Controller for Kubernetes installed, uninstall it. The AWS Load Balancer Controller replaces the functionality of the AWS ALB Ingress Controller for Kubernetes. + +**Install the AWS Load Balancer Controller using Helm V3 or later:** +Install the TargetGroupBinding custom resource definitions. +```console +kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" +``` + +**Add the eks-charts repository:** +```console +helm repo add eks https://aws.github.io/eks-charts +``` +Update your local repo to make sure that you have the most recent charts +```console +helm repo update +``` + +**Install the AWS Load Balancer Controller:** +Install aws-load-balancer-controller by using helm command + +```console +helm upgrade -i aws-load-balancer-controller eks/aws-load-balancer-controller --set clusterName= -n kube-system --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller +``` +eg :- +helm upgrade -i aws-load-balancer-controller eks/aws-load-balancer-controller --set clusterName=dev-k8ssandra-eks-cluster -n kube-system --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller + +**Check the deployment status:** +```console +kubectl get deployment -n kube-system aws-load-balancer-controller +``` +Check the deployment status you should see the following output. + +Output:- +NAME READY UP-TO-DATE AVAILABLE AGE +aws-load-balancer-controller 2/2 2 2 84s + +**check the logs:** +```console +kubectl logs -n kube-system deployment.apps/aws-load-balancer-controller +``` + +### Deploy K8ssandra + +Add the following helm repo's if there are not already added. +```console +helm repo add k8ssandra https://helm.k8ssandra.io/stable +``` + +### Installation +Deploy k8ssasndra by running the following command. +```console +helm install k8ssandra/k8ssandra --set cassandra.cassandraLibDirVolume.storageClass=gp2 + +eg: helm install test k8ssandra/k8ssandra --set cassandra.cassandraLibDirVolume.storageClass=gp2 +``` +After the installation please verify the pods status, it will take about 3-5 minutes provision the resources wait until all the resources are in running state. + +to check the status of the pods run the following command, wait until all the pods and services available. + +```console +kubectl get pods +``` + +### Create Nodeport +locate `nodeport.yaml` file and run the following command to create Nodeport service. +```console +kubectl create -f /nodeport.yaml +``` + +### Create ingress: +```console +cd test +kubectl create -f /ingress.yml +``` +Wait for the load balancers to come available +```console +kubectl get ingress +``` + +Output: +NAME CLASS HOSTS ADDRESS PORTS AGE +ingress * k8s-default-ingress-7cb30059ab-318290427.us-east-1.elb.amazonaws.com 80 38m + +When you create an Amazon EKS cluster, the IAM user or role (such as a federated user that creates the cluster) is automatically granted system:masters permissions in the cluster's RBAC configuration. If you access the Amazon EKS console and your IAM user or role isn't part of the aws-auth ConfigMap, then you can't see your Kubernetes workloads or overview details for the cluster. + +To grant additional AWS users or roles the ability to interact with your cluster, you must edit the aws-auth ConfigMap within Kubernetes. + +### Map the IAM users or roles to the RBAC roles and groups using aws-auth ConfigMap + +**Get the configuration of your AWS CLI user or role:** + +```console +aws sts get-caller-identity +``` +Confirm that the ARN matches the cluster creator or the admin with primary access to configure your cluster. If the ARN doesn't match the cluster creator or admin, then contact the cluster creator or admin to update the aws-auth ConfigMap. + +The group name in the [manifest](../scripts/manifest.yaml) file is `eks-console-dashboard-full-access-group`. This is the group that your IAM user or role must be mapped to in the aws-auth ConfigMap. + +**To create a cluster role and cluster role binding:** + +Deploy the manifest file: Use the [manifest](../scripts/manifest.yaml) file. +```console +kubectl apply -f manifest.yaml +``` + +**Verify the creation of cluster role and cluster role binding objects:** +```console +kubectl describe clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole +kubectl describe clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding +``` + +To edit aws-auth ConfigMap in a text editor, the cluster creator or admin must run the following command: + +```console +kubectl edit configmap aws-auth -n kube-system +``` + +**To add an IAM user or IAM role, complete either of the following steps.** + +**Important:** mapUsers[].groups.`eks-console-dashboard-full-access-group`. Group name must be the same as the group name created in the [manifest](../scripts/manifest.yaml) file. + +```console +Add the IAM user to mapUsers. For example: + +mapUsers: | + - userarn: arn:aws:iam::XXXXXXXXXXXX:user/ + username: + groups: + - system:bootstrappers + - system:nodes + - eks-console-dashboard-full-access-group +``` + +-or- + +```console +Add the IAM role to mapRoles. For example: + +mapRoles: | + - rolearn: arn:aws:iam::XXXXXXXXXXXX:role/ + username: + groups: + - system:bootstrappers + - system:nodes + - eks-console-dashboard-full-access-group +``` + +### Verify access to your Amazon EKS cluster +login to the AWS management console, Select your cluster on the navigation pane and Check the Overview and Workloads tabs for errors. You shouldn't see any errors. + +## Debugging: +Helpful links to resolve issues on the EKS cluster access: +https://aws.amazon.com/premiumsupport/knowledge-center/eks-kubernetes-object-access-error/ diff --git a/aws/modules/eks/variables.tf b/aws/modules/eks/variables.tf index 83cb62c..e8b0c9f 100644 --- a/aws/modules/eks/variables.tf +++ b/aws/modules/eks/variables.tf @@ -63,7 +63,7 @@ variable "instance_profile_name" { variable "cluster_version" { description = "Version of the EKS cluster." type = string - default = "1.19" + default = "1.20" } variable "instance_type" { diff --git a/aws/modules/vpc/main.tf b/aws/modules/vpc/main.tf index 387ca67..2b865bf 100644 --- a/aws/modules/vpc/main.tf +++ b/aws/modules/vpc/main.tf @@ -39,7 +39,9 @@ resource "aws_subnet" "public_subnet" { tags = merge(var.tags, { "Name" = format("%s-public-subnet-%s", var.name, local.pub_avilability_zones[count.index]) - format("kubernetes.io/cluster/%s-eks-cluster", var.name) = "owned" + format("kubernetes.io/cluster/%s-eks-cluster", var.name) = "shared" + + "kubernetes.io/role/elb" = 1 } ) } @@ -62,7 +64,8 @@ resource "aws_subnet" "private_subnet" { tags = merge(var.tags, { "Name" = format("%s-private-subnet-%s", var.name, local.pri_avilability_zones[count.index]) - format("kubernetes.io/cluster/%s-eks-cluster", var.name) = "owned" + format("kubernetes.io/cluster/%s-eks-cluster", var.name) = "shared" + "kubernetes.io/role/internal-elb" = 1 } ) } @@ -221,7 +224,7 @@ resource "aws_security_group" "worker_security_group" { tags = merge(var.tags, { "Name" = format("%s-worker-security-group", var.name) - format("kubernetes.io/cluster/%s-eks-cluster", var.name) = "owned" + format("kubernetes.io/cluster/%s-eks-cluster", var.name) = "shared" } ) } diff --git a/aws/scripts/aws-auth-cm.yaml b/aws/scripts/aws-auth-cm.yaml new file mode 100644 index 0000000..d3b9d30 --- /dev/null +++ b/aws/scripts/aws-auth-cm.yaml @@ -0,0 +1,32 @@ +# Copyright 2021 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# sample config map +apiVersion: v1 +kind: ConfigMap +metadata: + name: aws-auth + namespace: kube-system +data: + mapRoles: | + - rolearn: arn:aws:iam::XXXXXXXXXXXX:role/dev1-k8ssandra-worker-role + username: system:node:{{EC2PrivateDNSName}} + groups: + - system:bootstrappers + - system:nodes + - rolearn: arn:aws:iam::XXXXXXXXXXXX:role/role-name + username: poweruser-eks + groups: + - name:eks-console-dashboard-full-access-group + - system:master diff --git a/aws/scripts/iam_policy.json b/aws/scripts/iam_policy.json new file mode 100644 index 0000000..c11ff94 --- /dev/null +++ b/aws/scripts/iam_policy.json @@ -0,0 +1,207 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "iam:CreateServiceLinkedRole", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeInstances", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeTags", + "ec2:GetCoipPoolUsage", + "ec2:DescribeCoipPools", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeListenerCertificates", + "elasticloadbalancing:DescribeSSLPolicies", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetGroupAttributes", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:DescribeTags" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "cognito-idp:DescribeUserPoolClient", + "acm:ListCertificates", + "acm:DescribeCertificate", + "iam:ListServerCertificates", + "iam:GetServerCertificate", + "waf-regional:GetWebACL", + "waf-regional:GetWebACLForResource", + "waf-regional:AssociateWebACL", + "waf-regional:DisassociateWebACL", + "wafv2:GetWebACL", + "wafv2:GetWebACLForResource", + "wafv2:AssociateWebACL", + "wafv2:DisassociateWebACL", + "shield:GetSubscriptionState", + "shield:DescribeProtection", + "shield:CreateProtection", + "shield:DeleteProtection" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateSecurityGroup" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateTags" + ], + "Resource": "arn:aws:ec2:*:*:security-group/*", + "Condition": { + "StringEquals": { + "ec2:CreateAction": "CreateSecurityGroup" + }, + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Resource": "arn:aws:ec2:*:*:security-group/*", + "Condition": { + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "true", + "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress", + "ec2:DeleteSecurityGroup" + ], + "Resource": "*", + "Condition": { + "Null": { + "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Resource": "*", + "Condition": { + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:CreateRule", + "elasticloadbalancing:DeleteRule" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:RemoveTags" + ], + "Resource": [ + "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*" + ], + "Condition": { + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "true", + "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:RemoveTags" + ], + "Resource": [ + "arn:aws:elasticloadbalancing:*:*:listener/net/*/*/*", + "arn:aws:elasticloadbalancing:*:*:listener/app/*/*/*", + "arn:aws:elasticloadbalancing:*:*:listener-rule/net/*/*/*", + "arn:aws:elasticloadbalancing:*:*:listener-rule/app/*/*/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:SetIpAddressType", + "elasticloadbalancing:SetSecurityGroups", + "elasticloadbalancing:SetSubnets", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:ModifyTargetGroupAttributes", + "elasticloadbalancing:DeleteTargetGroup" + ], + "Resource": "*", + "Condition": { + "Null": { + "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:DeregisterTargets" + ], + "Resource": "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*" + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:SetWebAcl", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:AddListenerCertificates", + "elasticloadbalancing:RemoveListenerCertificates", + "elasticloadbalancing:ModifyRule" + ], + "Resource": "*" + } + ] +} diff --git a/aws/scripts/manifest.yaml b/aws/scripts/manifest.yaml new file mode 100644 index 0000000..bec8006 --- /dev/null +++ b/aws/scripts/manifest.yaml @@ -0,0 +1,44 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: eks-console-dashboard-full-access-clusterrole +rules: +- apiGroups: + - "" + resources: + - nodes + - namespaces + - pods + verbs: + - get + - list +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - statefulsets + - replicasets + verbs: + - get + - list +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: eks-console-dashboard-full-access-binding +subjects: +- kind: Group + name: eks-console-dashboard-full-access-group + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: eks-console-dashboard-full-access-clusterrole + apiGroup: rbac.authorization.k8s.io diff --git a/aws/scripts/oicd_policy.json b/aws/scripts/oicd_policy.json new file mode 100644 index 0000000..4824076 --- /dev/null +++ b/aws/scripts/oicd_policy.json @@ -0,0 +1,54 @@ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "iam:ListOpenIDConnectProviderTags", + "iam:UpdateOpenIDConnectProviderThumbprint", + "iam:UntagOpenIDConnectProvider", + "iam:AddClientIDToOpenIDConnectProvider", + "iam:GetOpenIDConnectProvider", + "iam:TagOpenIDConnectProvider", + "iam:CreateOpenIDConnectProvider" + ], + "Resource": "arn:aws:iam::337811753388:oidc-provider/*" + }, + { + "Sid": "VisualEditor1", + "Effect": "Allow", + "Action": "iam:ListOpenIDConnectProviders", + "Resource": "*" + }, + { + "Sid": "VisualEditor2", + "Effect": "Allow", + "Action": [ + "cloudformation:CreateUploadBucket", + "cloudformation:RegisterType", + "cloudformation:DescribeStackDriftDetectionStatus", + "cloudformation:ListExports", + "cloudformation:ListStacks", + "cloudformation:SetTypeDefaultVersion", + "cloudformation:DescribeType", + "cloudformation:ListImports", + "cloudformation:ListTypes", + "cloudformation:DescribeTypeRegistration", + "cloudformation:DeregisterType", + "cloudformation:ListTypeRegistrations", + "cloudformation:EstimateTemplateCost", + "cloudformation:DescribeAccountLimits", + "cloudformation:CreateStackSet", + "cloudformation:ValidateTemplate", + "cloudformation:ListTypeVersions" + ], + "Resource": "*" + }, + { + "Sid": "VisualEditor3", + "Effect": "Allow", + "Action": "cloudformation:*", + "Resource": "arn:aws:cloudformation:*:337811753388:stack/*/*" + } + ] +} diff --git a/aws/scripts/policy_document.json b/aws/scripts/policy_document.json index 6a6164f..3255df2 100644 --- a/aws/scripts/policy_document.json +++ b/aws/scripts/policy_document.json @@ -187,4 +187,4 @@ Policy for Auto Scaling Group. "Resource": "*" } ] -} \ No newline at end of file +} diff --git a/gcp/modules/iam/outputs.tf b/gcp/modules/iam/outputs.tf index 176cc3f..9f90e14 100644 --- a/gcp/modules/iam/outputs.tf +++ b/gcp/modules/iam/outputs.tf @@ -22,5 +22,4 @@ output "service_account" { output "service_account_key" { description = "The service Account Key to configure Medusa backups to use GCS bucket" value = base64decode(google_service_account_key.service_account_key.private_key) - sensitive = true } diff --git a/test/grafana-ingress.yml b/test/grafana-ingress.yml new file mode 100644 index 0000000..6cea58b --- /dev/null +++ b/test/grafana-ingress.yml @@ -0,0 +1,18 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: reaper-ingress + annotations: + kubernetes.io/ingress.class: alb + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: instance +spec: + rules: + - http: + paths: + - path: /webui/* + backend: + service: + name: reaper-nodeport + port: + number: 8080 diff --git a/test/ingress.yml b/test/ingress.yml new file mode 100644 index 0000000..659280e --- /dev/null +++ b/test/ingress.yml @@ -0,0 +1,38 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress + annotations: + kubernetes.io/ingress.class: alb + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: instance +spec: + rules: + - http: + paths: + - path: /* + service: + name: grafana-nodeport + port: + number: 80 + pathType: ImplementationSpecific +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: reaper-ingress + annotations: + kubernetes.io/ingress.class: alb + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: instance +spec: + rules: + - http: + paths: + - path: /webui/* + backend: + service: + name: reaper-nodeport + port: + number: 8080 + pathType: ImplementationSpecific diff --git a/test/nodeport.yaml b/test/nodeport.yaml new file mode 100644 index 0000000..4f7a444 --- /dev/null +++ b/test/nodeport.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-nodeport +spec: + type: NodePort + selector: + app.kubernetes.io/instance: test + app.kubernetes.io/name: grafana + ports: + - name: app-grafana + port: 80 + protocol: TCP + targetPort: 3000 +--- +apiVersion: v1 +kind: Service +metadata: + name: reaper-nodeport +spec: + type: NodePort + selector: + app.kubernetes.io/managed-by: reaper-operator + reaper.cassandra-reaper.io/reaper: test-reaper + ports: + - name: app + port: 8080 + protocol: TCP + targetPort: 8080 diff --git a/test/reaper-ingress.yaml b/test/reaper-ingress.yaml index 511c182..ad64247 100644 --- a/test/reaper-ingress.yaml +++ b/test/reaper-ingress.yaml @@ -1,14 +1,16 @@ apiVersion: extensions/v1beta1 kind: Ingress metadata: - name: reaper - namespace: default + name: reaper-ingress + annotations: + kubernetes.io/ingress.class: alb + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: instance spec: rules: - http: paths: - - backend: + - path: /webui/* + backend: serviceName: reaper-nodeport servicePort: 8080 - path: /webui/* - pathType: ImplementationSpecific diff --git a/test/reaper-nodeport.yaml b/test/reaper-nodeport.yaml deleted file mode 100644 index 0b61650..0000000 --- a/test/reaper-nodeport.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: reaper-nodeport -spec: - ports: - - name: app - port: 8080 - protocol: TCP - targetPort: 8080 - selector: - app.kubernetes.io/managed-by: reaper-operator - reaper.cassandra-reaper.io/reaper: test-reaper - type: NodePort From 593be31d11369479eb6c91bc68ed8989b3f24cee Mon Sep 17 00:00:00 2001 From: chaitu6022 Date: Mon, 28 Jun 2021 08:47:24 -0700 Subject: [PATCH 2/3] Update outputs.tf --- gcp/modules/iam/outputs.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/gcp/modules/iam/outputs.tf b/gcp/modules/iam/outputs.tf index 9f90e14..df7933e 100644 --- a/gcp/modules/iam/outputs.tf +++ b/gcp/modules/iam/outputs.tf @@ -22,4 +22,5 @@ output "service_account" { output "service_account_key" { description = "The service Account Key to configure Medusa backups to use GCS bucket" value = base64decode(google_service_account_key.service_account_key.private_key) + sensitive = true } From 21a7f9c27da026f4c9d90fc105858ec12f84be1e Mon Sep 17 00:00:00 2001 From: chaitu6022 Date: Wed, 30 Jun 2021 09:27:33 -0700 Subject: [PATCH 3/3] Update main.tf --- aws/modules/vpc/main.tf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aws/modules/vpc/main.tf b/aws/modules/vpc/main.tf index 2b865bf..081cc72 100644 --- a/aws/modules/vpc/main.tf +++ b/aws/modules/vpc/main.tf @@ -41,7 +41,7 @@ resource "aws_subnet" "public_subnet" { format("kubernetes.io/cluster/%s-eks-cluster", var.name) = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } ) } @@ -65,6 +65,7 @@ resource "aws_subnet" "private_subnet" { "Name" = format("%s-private-subnet-%s", var.name, local.pri_avilability_zones[count.index]) format("kubernetes.io/cluster/%s-eks-cluster", var.name) = "shared" + "kubernetes.io/role/internal-elb" = 1 } )