From c1fef14791ebb351e412dbf650aadfb72b6b26f3 Mon Sep 17 00:00:00 2001 From: Mikkel Oscar Lyderik Larsen Date: Mon, 2 Dec 2024 21:35:42 +0100 Subject: [PATCH 01/14] Drop kube-node-ready Signed-off-by: Mikkel Oscar Lyderik Larsen --- cluster/config-defaults.yaml | 3 - cluster/manifests/deletions.yaml | 11 --- .../manifests/kube-node-ready/01-rbac.yaml | 9 --- .../manifests/kube-node-ready/daemonset.yaml | 69 ------------------- .../manifests/kube-node-ready/service.yaml | 20 ------ cluster/node-pools/worker-combined/stack.yaml | 10 --- cluster/node-pools/worker-splitaz/stack.yaml | 10 --- 7 files changed, 132 deletions(-) delete mode 100644 cluster/manifests/kube-node-ready/01-rbac.yaml delete mode 100644 cluster/manifests/kube-node-ready/daemonset.yaml delete mode 100644 cluster/manifests/kube-node-ready/service.yaml diff --git a/cluster/config-defaults.yaml b/cluster/config-defaults.yaml index bda219be8e..ed7b8a0345 100644 --- a/cluster/config-defaults.yaml +++ b/cluster/config-defaults.yaml @@ -484,9 +484,6 @@ kubernetes_lifecycle_metrics_mem_min: "120Mi" kube_node_ready_controller_cpu: "50m" kube_node_ready_controller_memory: "200Mi" -# Enable kube-node-ready ASG lifecycle hook feature. -kube_node_ready_enabled: "true" - # Enable deployment of aws-cloud-controller-manager aws_cloud_controller_manager_enabled: "true" aws_cloud_controller_manager_cpu: "125m" diff --git a/cluster/manifests/deletions.yaml b/cluster/manifests/deletions.yaml index 31a5b09023..fac8fd1964 100644 --- a/cluster/manifests/deletions.yaml +++ b/cluster/manifests/deletions.yaml @@ -309,17 +309,6 @@ post_apply: kind: DaemonSet namespace: kube-system {{- end }} -{{- if ne .Cluster.ConfigItems.kube_node_ready_enabled "true" }} -- name: kube-node-ready - kind: DaemonSet - namespace: kube-system -- name: kube-node-ready - kind: ServiceAccount - namespace: kube-system -- name: kube-node-ready - kind: Service - namespace: kube-system -{{- end }} {{- if ne .Cluster.ConfigItems.role_sync_controller_enabled "true" }} - name: role-sync-controller kind: CronJob diff --git a/cluster/manifests/kube-node-ready/01-rbac.yaml b/cluster/manifests/kube-node-ready/01-rbac.yaml deleted file mode 100644 index e6d2d09cab..0000000000 --- a/cluster/manifests/kube-node-ready/01-rbac.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# {{ if eq .Cluster.ConfigItems.kube_node_ready_enabled "true" }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-node-ready - namespace: kube-system - annotations: - iam.amazonaws.com/role: "{{ .Cluster.LocalID }}-kube-node-ready" -# {{ end }} diff --git a/cluster/manifests/kube-node-ready/daemonset.yaml b/cluster/manifests/kube-node-ready/daemonset.yaml deleted file mode 100644 index 65f2e4d12b..0000000000 --- a/cluster/manifests/kube-node-ready/daemonset.yaml +++ /dev/null @@ -1,69 +0,0 @@ -# {{ if eq .Cluster.ConfigItems.kube_node_ready_enabled "true" }} -# {{ $image := "container-registry.zalando.net/teapot/kube-node-ready:master-34" }} -# {{ $version := index (split $image ":") 1 }} - -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: kube-node-ready - namespace: kube-system - labels: - application: kubernetes - component: kube-node-ready - version: {{$version}} -spec: - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - daemonset: kube-node-ready - template: - metadata: - labels: - daemonset: kube-node-ready - application: kubernetes - component: kube-node-ready - version: {{$version}} - annotations: - logging/destination: "{{.Cluster.ConfigItems.log_destination_infra}}" - spec: - # only schedule on nodes which require ASG lifecycle hook to trigger - nodeSelector: - asg-lifecycle-hook: "true" - serviceAccountName: kube-node-ready - dnsConfig: - options: - - name: ndots - value: "1" - priorityClassName: system-node-critical - tolerations: - - operator: Exists - effect: NoSchedule - - operator: Exists - effect: NoExecute - containers: - - name: kube-node-ready - image: "{{ $image }}" - args: - - --lifecycle-hook=kube-node-ready-lifecycle-hook - resources: - requests: - cpu: 1m - memory: 50Mi - ephemeral-storage: 256Mi - limits: - cpu: 1m - memory: 50Mi - readinessProbe: - httpGet: - path: /healthz - port: 8080 - initialDelaySeconds: 10 - timeoutSeconds: 300 - securityContext: - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - securityContext: - fsGroup: 65534 -# {{ end }} diff --git a/cluster/manifests/kube-node-ready/service.yaml b/cluster/manifests/kube-node-ready/service.yaml deleted file mode 100644 index 5a9fb3a838..0000000000 --- a/cluster/manifests/kube-node-ready/service.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# {{ if eq .Cluster.ConfigItems.kube_node_ready_enabled "true" }} -kind: Service -apiVersion: v1 -metadata: - name: kube-node-ready - namespace: kube-system - labels: - application: kubernetes - component: kube-node-ready -spec: - type: NodePort - externalTrafficPolicy: Local - ports: - - port: 80 - nodePort: 30080 - targetPort: 8080 - protocol: TCP - selector: - component: kube-node-ready -# {{ end }} diff --git a/cluster/node-pools/worker-combined/stack.yaml b/cluster/node-pools/worker-combined/stack.yaml index 38a0d1e285..9f48f1a68b 100644 --- a/cluster/node-pools/worker-combined/stack.yaml +++ b/cluster/node-pools/worker-combined/stack.yaml @@ -174,13 +174,3 @@ Resources: Roles: - !ImportValue '{{ .Cluster.ID }}:worker-iam-role' Type: 'AWS::IAM::InstanceProfile' -# {{ if eq .Cluster.ConfigItems.kube_node_ready_enabled "true" }} - AutoscalingLifecycleHook: - Properties: - AutoScalingGroupName: !Ref AutoScalingGroup - LifecycleHookName: "kube-node-ready-lifecycle-hook" - DefaultResult: CONTINUE - HeartbeatTimeout: '600' - LifecycleTransition: 'autoscaling:EC2_INSTANCE_LAUNCHING' - Type: 'AWS::AutoScaling::LifecycleHook' -# {{ end }} diff --git a/cluster/node-pools/worker-splitaz/stack.yaml b/cluster/node-pools/worker-splitaz/stack.yaml index 10dafb3811..95b0bbd0a3 100644 --- a/cluster/node-pools/worker-splitaz/stack.yaml +++ b/cluster/node-pools/worker-splitaz/stack.yaml @@ -125,16 +125,6 @@ Resources: VPCZoneIdentifier: - "{{ index $data.Values.subnets $az }}" Type: 'AWS::AutoScaling::AutoScalingGroup' -# {{ if eq $data.Cluster.ConfigItems.kube_node_ready_enabled "true" }} - AutoscalingLifecycleHook{{$azID}}: - Properties: - AutoScalingGroupName: !Ref AutoScalingGroup{{$azID}} - LifecycleHookName: "kube-node-ready-lifecycle-hook" - DefaultResult: CONTINUE - HeartbeatTimeout: '600' - LifecycleTransition: 'autoscaling:EC2_INSTANCE_LAUNCHING' - Type: 'AWS::AutoScaling::LifecycleHook' -# {{ end }} {{ end }} {{ end }} {{ end }} From 09e43a8bffe1300fe2ab28c23d3f174667b3da85 Mon Sep 17 00:00:00 2001 From: Mikkel Oscar Lyderik Larsen Date: Mon, 2 Dec 2024 21:40:54 +0100 Subject: [PATCH 02/14] Drop control_plane_asg_lifecycle_hook Signed-off-by: Mikkel Oscar Lyderik Larsen --- cluster/config-defaults.yaml | 5 ----- cluster/manifests/kube2iam/daemonset.yaml | 2 +- cluster/node-pools/master-default/stack.yaml | 10 ---------- cluster/node-pools/master-default/userdata.yaml | 2 +- 4 files changed, 2 insertions(+), 17 deletions(-) diff --git a/cluster/config-defaults.yaml b/cluster/config-defaults.yaml index ed7b8a0345..600f26cadf 100644 --- a/cluster/config-defaults.yaml +++ b/cluster/config-defaults.yaml @@ -775,8 +775,6 @@ kube2iam_cpu: "25m" kube2iam_memory: "100Mi" # configure whether kube2iam should only run on worker nodes. -# This depends on control_plane_asg_lifecycle_hook=false as kube-node-ready -# doesn't work without kube2iam. kube2iam_worker_only: "true" # CIDR configuration for nodes and pods @@ -1132,9 +1130,6 @@ apiserver_memory_limit_percent: "80" apiserver_max_requests_inflight: "400" -# specify if control plane nodes should rely on ASG Lifecycle Hook or not -control_plane_asg_lifecycle_hook: "false" - # enable graceful shutdown on the control_plane nodes control_plane_graceful_shutdown: "true" diff --git a/cluster/manifests/kube2iam/daemonset.yaml b/cluster/manifests/kube2iam/daemonset.yaml index 9ac5efd6ea..c8ead191cd 100644 --- a/cluster/manifests/kube2iam/daemonset.yaml +++ b/cluster/manifests/kube2iam/daemonset.yaml @@ -21,7 +21,7 @@ spec: annotations: logging/destination: "{{.Cluster.ConfigItems.log_destination_infra}}" spec: -{{- if and (eq .Cluster.ConfigItems.kube2iam_worker_only "true") (eq .Cluster.ConfigItems.control_plane_asg_lifecycle_hook "false") }} +{{- if eq .Cluster.ConfigItems.kube2iam_worker_only "true" }} nodeSelector: node.kubernetes.io/role: worker {{- end }} diff --git a/cluster/node-pools/master-default/stack.yaml b/cluster/node-pools/master-default/stack.yaml index 5e44ea9666..8fa31347e6 100644 --- a/cluster/node-pools/master-default/stack.yaml +++ b/cluster/node-pools/master-default/stack.yaml @@ -101,13 +101,3 @@ Resources: Roles: - !ImportValue '{{ .Cluster.ID }}:master-iam-role' Type: 'AWS::IAM::InstanceProfile' -{{- if eq .Cluster.ConfigItems.control_plane_asg_lifecycle_hook "true" }} - AutoscalingLifecycleHook: - Properties: - AutoScalingGroupName: !Ref AutoScalingGroup - LifecycleHookName: "kube-node-ready-lifecycle-hook" - DefaultResult: CONTINUE - HeartbeatTimeout: '600' - LifecycleTransition: 'autoscaling:EC2_INSTANCE_LAUNCHING' - Type: 'AWS::AutoScaling::LifecycleHook' -{{- end }} diff --git a/cluster/node-pools/master-default/userdata.yaml b/cluster/node-pools/master-default/userdata.yaml index 23eef646f4..aace81aeef 100644 --- a/cluster/node-pools/master-default/userdata.yaml +++ b/cluster/node-pools/master-default/userdata.yaml @@ -4,7 +4,7 @@ write_files: path: /etc/kubernetes/secrets.env content: | NODEPOOL_TAINTS=node.kubernetes.io/role=master:NoSchedule{{if index .NodePool.ConfigItems "taints"}},{{.NodePool.ConfigItems.taints}}{{end}} - NODE_LABELS=master=true,node.kubernetes.io/exclude-from-external-load-balancers,node.kubernetes.io/distro=ubuntu,cluster-lifecycle-controller.zalan.do/decommission-priority=999,lifecycle-status=ready{{if index .NodePool.ConfigItems "labels"}},{{.NodePool.ConfigItems.labels}}{{end}}{{if eq .Cluster.ConfigItems.control_plane_asg_lifecycle_hook "true" }},asg-lifecycle-hook=true{{end}} + NODE_LABELS=master=true,node.kubernetes.io/exclude-from-external-load-balancers,node.kubernetes.io/distro=ubuntu,cluster-lifecycle-controller.zalan.do/decommission-priority=999,lifecycle-status=ready{{if index .NodePool.ConfigItems "labels"}},{{.NodePool.ConfigItems.labels}}{{end}} NODEPOOL_NAME={{ .NodePool.Name }} KUBELET_ROLE=master From 7aa271d4415ba92ed891d5acb7f712ddd5cbc75e Mon Sep 17 00:00:00 2001 From: Mahmoud Gaballah Date: Mon, 2 Dec 2024 19:50:48 +0100 Subject: [PATCH 03/14] adding a special instance-types string 'not-specified' Signed-off-by: Mahmoud Gaballah --- cluster/node-pools/worker-karpenter/provisioners.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cluster/node-pools/worker-karpenter/provisioners.yaml b/cluster/node-pools/worker-karpenter/provisioners.yaml index 9fe8f0513a..f5c9ad9831 100644 --- a/cluster/node-pools/worker-karpenter/provisioners.yaml +++ b/cluster/node-pools/worker-karpenter/provisioners.yaml @@ -124,7 +124,7 @@ spec: # {{ end}} #{{ end}} -#{{ if and (eq (len .NodePool.InstanceTypes) 1) (eq (index .NodePool.InstanceTypes 0) "default-for-karpenter") }} +#{{ if (eq .NodePool.KarpenterInstanceTypeStrategy "default-for-karpenter" ) }} - key: "karpenter.k8s.aws/instance-family" operator: In values: @@ -155,7 +155,7 @@ spec: - "c7in" - "m7in" - "r7in" -#{{ else if (gt (len .NodePool.InstanceTypes) 0) }} +#{{ else if (eq .NodePool.KarpenterInstanceTypeStrategy "custom" ) }} - key: "node.kubernetes.io/instance-type" operator: In values: @@ -165,7 +165,7 @@ spec: #{{ end }} # safety guards to prevent the use of unwanted instance types in case the user has not specified any specific instance types -#{{ if or (eq .NodePool.KarpenterInstanceTypeStrategy "default-for-karpenter") (eq .NodePool.KarpenterInstanceTypeStrategy "not-specified") }} +#{{ if ne .NodePool.KarpenterInstanceTypeStrategy "custom" }} # exclude unwanted sizes - key: "karpenter.k8s.aws/instance-size" operator: "NotIn" From 99438af846906fce0196fc2d5cdeefaa2dd74086 Mon Sep 17 00:00:00 2001 From: Martin Linkhorst Date: Tue, 19 Nov 2024 17:06:13 +0100 Subject: [PATCH 04/14] switch to generic deny-all admitter, match conditions and fail policy --- cluster/config-defaults.yaml | 6 ++--- .../01-admission-control/teapot.yaml | 24 +++++++++++++++---- .../node-pools/master-default/userdata.yaml | 2 +- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/cluster/config-defaults.yaml b/cluster/config-defaults.yaml index bda219be8e..539b5d2d68 100644 --- a/cluster/config-defaults.yaml +++ b/cluster/config-defaults.yaml @@ -677,9 +677,9 @@ teapot_admission_controller_configmap_deletion_protection_factories_enabled: "tr # enable the rolebinding admission-controller webhook which validates rolebindings and clusterrolebindings teapot_admission_controller_enable_rolebinding_webhook: "true" -# enable the generic admission-controller webhook which catches all resources -teapot_admission_controller_enable_generic_webhook: "false" -# prevent write operations for non-admin users in protected namespaces +# enable the generic deny-all admission webhook which rejects all requests it receives +teapot_admission_controller_enable_write_protection_webhook: "false" +# configure the behaviour of the deny-all admission webhook, `true` blocks everything, `false` allows everything teapot_admission_controller_prevent_write_operations: "false" # Enable and configure Pod Security Policy rules implemented in admission-controller. diff --git a/cluster/manifests/01-admission-control/teapot.yaml b/cluster/manifests/01-admission-control/teapot.yaml index f30663d2e7..f224423731 100644 --- a/cluster/manifests/01-admission-control/teapot.yaml +++ b/cluster/manifests/01-admission-control/teapot.yaml @@ -267,10 +267,10 @@ webhooks: apiVersions: ["v1"] resources: ["rolebindings", "clusterrolebindings"] {{- end }} -{{- if eq .Cluster.ConfigItems.teapot_admission_controller_enable_generic_webhook "true" }} - - name: generic-namespaced-admitter.teapot.zalan.do +{{- if eq .Cluster.ConfigItems.teapot_admission_controller_enable_write_protection_webhook "true" }} + - name: namespaced-deny-admitter.teapot.zalan.do clientConfig: - url: "https://localhost:8085/generic" + url: "https://localhost:8085/deny" caBundle: "{{ .Cluster.ConfigItems.ca_cert_decompressed }}" admissionReviewVersions: ["v1beta1"] failurePolicy: Fail @@ -287,9 +287,16 @@ webhooks: apiVersions: ["*"] resources: ["*/*"] scope: "Namespaced" - - name: generic-cluster-admitter.teapot.zalan.do + matchConditions: + - name: 'exclude-privileged-groups' + expression: 'request.userInfo.groups.all(g, !(g in ["system:masters", "system:nodes", "system:serviceaccounts:kube-system", "okta:common/administrator", "zalando:administrator"]))' + - name: 'exclude-privileged-usernames' + expression: '!(request.userInfo.username in ["system:kube-controller-manager", "system:kube-scheduler", "zalando-iam:zalando:service:k8sapi_credentials-provider"])' + - name: 'exclude-eks-components' + expression: '!request.userInfo.username.startsWith("eks:")' + - name: global-deny-admitter.teapot.zalan.do clientConfig: - url: "https://localhost:8085/generic" + url: "https://localhost:8085/deny" caBundle: "{{ .Cluster.ConfigItems.ca_cert_decompressed }}" admissionReviewVersions: ["v1beta1"] failurePolicy: Fail @@ -304,4 +311,11 @@ webhooks: apiVersions: ["*"] resources: ["*/*"] scope: "Cluster" + matchConditions: + - name: 'exclude-privileged-groups' + expression: 'request.userInfo.groups.all(g, !(g in ["system:masters", "system:nodes", "system:serviceaccounts:kube-system", "okta:common/administrator", "zalando:administrator"]))' + - name: 'exclude-privileged-usernames' + expression: '!(request.userInfo.username in ["system:kube-controller-manager", "system:kube-scheduler", "zalando-iam:zalando:service:k8sapi_credentials-provider"])' + - name: 'exclude-eks-components' + expression: '!request.userInfo.username.startsWith("eks:")' {{- end }} diff --git a/cluster/node-pools/master-default/userdata.yaml b/cluster/node-pools/master-default/userdata.yaml index 23eef646f4..445ccc6fd2 100644 --- a/cluster/node-pools/master-default/userdata.yaml +++ b/cluster/node-pools/master-default/userdata.yaml @@ -206,7 +206,7 @@ write_files: limits: memory: {{ .Values.InstanceInfo.MemoryFraction (parseInt64 .Cluster.ConfigItems.apiserver_memory_limit_percent)}} {{- end }} - - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/teapot/admission-controller:master-224 + - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/teapot/admission-controller:master-226 name: admission-controller lifecycle: preStop: From 2bb083b8b8cab841a97510673f80c2738b5a5c7c Mon Sep 17 00:00:00 2001 From: Martin Linkhorst Date: Tue, 3 Dec 2024 11:44:27 +0100 Subject: [PATCH 05/14] add an admitter that validates that unprivileged users cannot exec into postgres pods --- .../01-admission-control/teapot.yaml | 24 +++++++++++++++++++ .../node-pools/master-default/userdata.yaml | 2 +- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/cluster/manifests/01-admission-control/teapot.yaml b/cluster/manifests/01-admission-control/teapot.yaml index f224423731..386cb2bb93 100644 --- a/cluster/manifests/01-admission-control/teapot.yaml +++ b/cluster/manifests/01-admission-control/teapot.yaml @@ -268,6 +268,30 @@ webhooks: resources: ["rolebindings", "clusterrolebindings"] {{- end }} {{- if eq .Cluster.ConfigItems.teapot_admission_controller_enable_write_protection_webhook "true" }} + - name: pod-exec-admitter.teapot.zalan.do + clientConfig: + url: "https://localhost:8085/pod/exec" + caBundle: "{{ .Cluster.ConfigItems.ca_cert_decompressed }}" + admissionReviewVersions: ["v1beta1"] + failurePolicy: Fail + sideEffects: "NoneOnDryRun" + matchPolicy: Equivalent + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: NotIn + values: [ "kube-system", "visibility", "kubenurse" ] + rules: + - operations: [ "CONNECT" ] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods/exec"] + scope: "Namespaced" + matchConditions: + - name: 'exclude-privileged-groups' + expression: 'request.userInfo.groups.all(g, !(g in ["okta:common/administrator", "zalando:administrator"]))' + - name: 'exclude-postgres-admins' + expression: 'request.userInfo.groups.all(g, !(g in ["okta:common/postgres-admin"]))' - name: namespaced-deny-admitter.teapot.zalan.do clientConfig: url: "https://localhost:8085/deny" diff --git a/cluster/node-pools/master-default/userdata.yaml b/cluster/node-pools/master-default/userdata.yaml index 445ccc6fd2..38928763f5 100644 --- a/cluster/node-pools/master-default/userdata.yaml +++ b/cluster/node-pools/master-default/userdata.yaml @@ -206,7 +206,7 @@ write_files: limits: memory: {{ .Values.InstanceInfo.MemoryFraction (parseInt64 .Cluster.ConfigItems.apiserver_memory_limit_percent)}} {{- end }} - - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/teapot/admission-controller:master-226 + - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/teapot/admission-controller:master-227 name: admission-controller lifecycle: preStop: From 8153ab45aa8111001e98201c42951430bb94b447 Mon Sep 17 00:00:00 2001 From: "k8s-on-aws-manager-app[bot]" <181735053+k8s-on-aws-manager-app[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 14:03:36 +0000 Subject: [PATCH 06/14] platform-iam-tokeninfo: Update to version master-131 Update 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/foundation/platform-iam-tokeninfo to version master-131 --- cluster/node-pools/master-default/userdata.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cluster/node-pools/master-default/userdata.yaml b/cluster/node-pools/master-default/userdata.yaml index 23eef646f4..e93ba5db15 100644 --- a/cluster/node-pools/master-default/userdata.yaml +++ b/cluster/node-pools/master-default/userdata.yaml @@ -357,7 +357,7 @@ write_files: - mountPath: /etc/kubernetes/k8s-authnz-webhook-kubeconfig name: k8s-authnz-webhook-kubeconfig readOnly: true - - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/foundation/platform-iam-tokeninfo:master-130 + - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/foundation/platform-iam-tokeninfo:master-131 name: tokeninfo ports: - containerPort: 9021 @@ -388,7 +388,7 @@ write_files: value: {{ .Cluster.ConfigItems.apiserver_business_partner_ids }} {{ if ne .Cluster.Environment "production" }} - name: tokeninfo-sandbox - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/foundation/platform-iam-tokeninfo:master-130 + image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/foundation/platform-iam-tokeninfo:master-131 ports: - containerPort: 9022 lifecycle: From 1c4a2f7a3e8787583d108fcfb2ac5d58063e3de0 Mon Sep 17 00:00:00 2001 From: "k8s-on-aws-manager-app[bot]" <181735053+k8s-on-aws-manager-app[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 14:52:29 +0000 Subject: [PATCH 07/14] kube-metrics-adapter: Update to version kube-metrics-adapter-0.2.3-33-g802bf08 Update container-registry.zalando.net/teapot/kube-metrics-adapter to version kube-metrics-adapter-0.2.3-33-g802bf08 --- cluster/manifests/kube-metrics-adapter/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/manifests/kube-metrics-adapter/deployment.yaml b/cluster/manifests/kube-metrics-adapter/deployment.yaml index 193d0c0b0f..11b6a2450f 100644 --- a/cluster/manifests/kube-metrics-adapter/deployment.yaml +++ b/cluster/manifests/kube-metrics-adapter/deployment.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: custom-metrics-apiserver containers: - name: kube-metrics-adapter - image: container-registry.zalando.net/teapot/kube-metrics-adapter:kube-metrics-adapter-0.2.3-31-g9ff49a9 + image: container-registry.zalando.net/teapot/kube-metrics-adapter:kube-metrics-adapter-0.2.3-33-g802bf08 env: - name: AWS_REGION value: {{ .Cluster.Region }} From 76cbea5408ca430e9e49d22cde9fff11cf3da325 Mon Sep 17 00:00:00 2001 From: "k8s-on-aws-manager-app[bot]" <181735053+k8s-on-aws-manager-app[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 15:05:36 +0000 Subject: [PATCH 08/14] deployment-controller: Update to version master-233 Update container-registry.zalando.net/teapot/deployment-controller to version master-233 --- .../manifests/deployment-service/controller-statefulset.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/manifests/deployment-service/controller-statefulset.yaml b/cluster/manifests/deployment-service/controller-statefulset.yaml index dffb941438..5028686e16 100644 --- a/cluster/manifests/deployment-service/controller-statefulset.yaml +++ b/cluster/manifests/deployment-service/controller-statefulset.yaml @@ -29,7 +29,7 @@ spec: terminationGracePeriodSeconds: 300 containers: - name: "deployment-service-controller" - image: "container-registry.zalando.net/teapot/deployment-controller:master-232" + image: "container-registry.zalando.net/teapot/deployment-controller:master-233" args: - "--config-namespace=kube-system" - "--decrypt-kms-alias-arn=arn:aws:kms:{{ .Cluster.Region }}:{{ .Cluster.InfrastructureAccount | getAWSAccountID }}:alias/deployment-secret" From 4a14e95ace0aef9be04e3bc2a864d883c188db65 Mon Sep 17 00:00:00 2001 From: "k8s-on-aws-manager-app[bot]" <181735053+k8s-on-aws-manager-app[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 15:05:43 +0000 Subject: [PATCH 09/14] deployment-status-service: Update to version master-233 Update container-registry.zalando.net/teapot/deployment-status-service to version master-233 --- .../manifests/deployment-service/status-service-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/manifests/deployment-service/status-service-deployment.yaml b/cluster/manifests/deployment-service/status-service-deployment.yaml index d56ba2efaf..6379442fc0 100644 --- a/cluster/manifests/deployment-service/status-service-deployment.yaml +++ b/cluster/manifests/deployment-service/status-service-deployment.yaml @@ -1,4 +1,4 @@ -# {{ $image := "container-registry.zalando.net/teapot/deployment-status-service:master-232" }} +# {{ $image := "container-registry.zalando.net/teapot/deployment-status-service:master-233" }} # {{ $version := index (split $image ":") 1 }} apiVersion: apps/v1 From d286c4d3117c4df61b64e8b72f655e896a2ebb2a Mon Sep 17 00:00:00 2001 From: "k8s-on-aws-manager-app[bot]" <181735053+k8s-on-aws-manager-app[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 08:25:35 +0000 Subject: [PATCH 10/14] unbound: Update to version 1.22.0-master-10 Update container-registry.zalando.net/teapot/unbound to version 1.22.0-master-10 --- cluster/manifests/coredns-local/daemonset-coredns.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/manifests/coredns-local/daemonset-coredns.yaml b/cluster/manifests/coredns-local/daemonset-coredns.yaml index f49241c065..c522960194 100644 --- a/cluster/manifests/coredns-local/daemonset-coredns.yaml +++ b/cluster/manifests/coredns-local/daemonset-coredns.yaml @@ -39,7 +39,7 @@ spec: containers: {{ if eq .Cluster.ConfigItems.dns_cache "unbound" }} - name: unbound - image: container-registry.zalando.net/teapot/unbound:1.19.2-master-9 + image: container-registry.zalando.net/teapot/unbound:1.22.0-master-10 args: - -d - -c From a02bc2c226000e5ba144a20b475658330aed50fd Mon Sep 17 00:00:00 2001 From: "k8s-on-aws-manager-app[bot]" <181735053+k8s-on-aws-manager-app[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 08:32:23 +0000 Subject: [PATCH 11/14] kube-metrics-adapter: Update to version kube-metrics-adapter-0.2.3-35-g498f85e Update container-registry.zalando.net/teapot/kube-metrics-adapter to version kube-metrics-adapter-0.2.3-35-g498f85e --- cluster/manifests/kube-metrics-adapter/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/manifests/kube-metrics-adapter/deployment.yaml b/cluster/manifests/kube-metrics-adapter/deployment.yaml index 11b6a2450f..96642a6e28 100644 --- a/cluster/manifests/kube-metrics-adapter/deployment.yaml +++ b/cluster/manifests/kube-metrics-adapter/deployment.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: custom-metrics-apiserver containers: - name: kube-metrics-adapter - image: container-registry.zalando.net/teapot/kube-metrics-adapter:kube-metrics-adapter-0.2.3-33-g802bf08 + image: container-registry.zalando.net/teapot/kube-metrics-adapter:kube-metrics-adapter-0.2.3-35-g498f85e env: - name: AWS_REGION value: {{ .Cluster.Region }} From ef1899b2ac68e395ff8f2de32e50bb5b43c25052 Mon Sep 17 00:00:00 2001 From: "k8s-on-aws-manager-app[bot]" <181735053+k8s-on-aws-manager-app[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 09:07:15 +0000 Subject: [PATCH 12/14] admission-controller: Update to version master-228 Update 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/teapot/admission-controller to version master-228 --- cluster/node-pools/master-default/userdata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/node-pools/master-default/userdata.yaml b/cluster/node-pools/master-default/userdata.yaml index 38928763f5..209a080986 100644 --- a/cluster/node-pools/master-default/userdata.yaml +++ b/cluster/node-pools/master-default/userdata.yaml @@ -206,7 +206,7 @@ write_files: limits: memory: {{ .Values.InstanceInfo.MemoryFraction (parseInt64 .Cluster.ConfigItems.apiserver_memory_limit_percent)}} {{- end }} - - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/teapot/admission-controller:master-227 + - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/teapot/admission-controller:master-228 name: admission-controller lifecycle: preStop: From 061715e9e81cb32ecf2d487e6a6068e5eaca7e1b Mon Sep 17 00:00:00 2001 From: "k8s-on-aws-manager-app[bot]" <181735053+k8s-on-aws-manager-app[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 09:51:33 +0000 Subject: [PATCH 13/14] aws-cloud-controller-manager-internal: Update to version v1.31.1-master-133 Update container-registry.zalando.net/teapot/aws-cloud-controller-manager-internal to version v1.31.1-master-133 --- cluster/manifests/aws-cloud-controller-manager/daemonset.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/manifests/aws-cloud-controller-manager/daemonset.yaml b/cluster/manifests/aws-cloud-controller-manager/daemonset.yaml index 04f3390f99..abee6bdf81 100644 --- a/cluster/manifests/aws-cloud-controller-manager/daemonset.yaml +++ b/cluster/manifests/aws-cloud-controller-manager/daemonset.yaml @@ -27,7 +27,7 @@ spec: - --cloud-provider=aws - --use-service-account-credentials=true - --configure-cloud-routes=false - image: container-registry.zalando.net/teapot/aws-cloud-controller-manager-internal:v1.31.1-master-132 + image: container-registry.zalando.net/teapot/aws-cloud-controller-manager-internal:v1.31.1-master-133 name: aws-cloud-controller-manager resources: requests: From 3af3b8b7a467594c8cb3a06760a3cebdd82a8a6d Mon Sep 17 00:00:00 2001 From: "k8s-on-aws-manager-app[bot]" <181735053+k8s-on-aws-manager-app[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 10:07:42 +0000 Subject: [PATCH 14/14] kube-controller-manager-internal: Update to version v1.31.3-master-133 Update 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/teapot/kube-controller-manager-internal to version v1.31.3-master-133 --- cluster/node-pools/master-default/userdata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/node-pools/master-default/userdata.yaml b/cluster/node-pools/master-default/userdata.yaml index 0a1781a90e..0a7a4b7e55 100644 --- a/cluster/node-pools/master-default/userdata.yaml +++ b/cluster/node-pools/master-default/userdata.yaml @@ -600,7 +600,7 @@ write_files: containers: - name: kube-controller-manager {{- if eq .Cluster.ConfigItems.kubernetes_controller_manager_image "zalando" }} - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/teapot/kube-controller-manager-internal:v1.31.2-master-132 + image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/teapot/kube-controller-manager-internal:v1.31.3-master-133 {{- else }} image: nonexistent.zalan.do/teapot/kube-controller-manager:fixed {{- end }}