forked from miqdigital/terraform-aws-eks-cluster
-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy patheks-worker-node-upgrade-v2.tf.txt
executable file
·80 lines (65 loc) · 3.03 KB
/
eks-worker-node-upgrade-v2.tf.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
# EKS currently documents this required userdata for EKS worker nodes to
# properly configure Kubernetes applications on the EC2 instance.
# We utilize a Terraform local here to simplify Base64 encoding this
# information into the AutoScaling Launch Configuration.
# More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml
## updated AMI support for /etc/eks/bootstrap.sh
#### User data for worker launch
locals {
eks-node-private-userdata-v2 = <<USERDATA
#!/bin/bash -xe
sudo /etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.eks-cluster.endpoint}' --b64-cluster-ca '${aws_eks_cluster.eks-cluster.certificate_authority.0.data}' '${var.cluster-name}' \
--kubelet-extra-args "--node-labels=app=name --register-with-taints=app=name:NoExecute --kube-reserved cpu=500m,memory=1Gi,ephemeral-storage=1Gi --system-reserved cpu=500m,memory=1Gi,ephemeral-storage=1Gi --eviction-hard memory.available<500Mi,nodefs.available<10%"
USERDATA
}
resource "aws_launch_configuration" "eks-private-lc-v2" {
iam_instance_profile = "${aws_iam_instance_profile.eks-node.name}"
image_id = "${var.eks-worker-ami}" ## update to th bew version of ami --visit https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html
instance_type = "${var.worker-node-instance_type}" # use instance variable
key_name = "${var.ssh_key_pair}"
name_prefix = "eks-private"
security_groups = ["${aws_security_group.eks-node.id}"]
user_data_base64 = "${base64encode(local.eks-node-private-userdata-v2)}"
root_block_device {
delete_on_termination = true
volume_size = "${var.volume_size}"
volume_type = "gp2"
}
lifecycle {
create_before_destroy = true
}
}
resource "aws_autoscaling_group" "eks-private-asg-v2" {
desired_capacity = 1
launch_configuration = "${aws_launch_configuration.eks-private-lc-v2.id}"
max_size = 2
min_size = 1
name = "eks-private"
vpc_zone_identifier = data.aws_subnets.subs_priv.ids
tag {
key = "Name"
value = "eks-worker-private-node-v2"
propagate_at_launch = true
}
tag {
key = "kubernetes.io/cluster/${var.cluster-name}"
value = "owned"
propagate_at_launch = true
}
## Enable this when you use cluster autoscaler within cluster.
## https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md
# tag {
# key = "k8s.io/cluster-autoscaler/enabled"
# value = ""
# propagate_at_launch = true
# }
#
# tag {
# key = "k8s.io/cluster-autoscaler/${var.cluster-name}"
# value = ""
# propagate_at_launch = true
# }
}
# Adding EKS workers scaling policy for scale up/down
# Creating Cloudwatch alarms for both scale up/down
##If require you can use scale up/down policy or else cluster autoscaler will take of scalling the node.