From 9fe126f42986032e5c307d71a5a868f19f8e64be Mon Sep 17 00:00:00 2001 From: fabiankramm Date: Thu, 25 Nov 2021 14:12:22 +0100 Subject: [PATCH 1/3] refactor: make separate helm chart for k0s --- .github/workflows/release.yaml | 3 +- Dockerfile | 2 +- chart/templates/secret.yaml | 19 - {chart => charts/k0s}/.helmignore | 0 charts/k0s/Chart.yaml | 15 + {chart => charts/k0s}/templates/NOTES.txt | 0 {chart => charts/k0s}/templates/_helpers.tpl | 0 {chart => charts/k0s}/templates/ingress.yaml | 0 .../k0s}/templates/rbac/clusterrole.yaml | 0 .../templates/rbac/clusterrolebinding.yaml | 0 .../k0s}/templates/rbac/role.yaml | 0 .../k0s}/templates/rbac/rolebinding.yaml | 0 charts/k0s/templates/secret.yaml | 34 + {chart => charts/k0s}/templates/service.yaml | 0 .../k0s}/templates/serviceaccount.yaml | 0 .../k0s}/templates/statefulset-service.yaml | 0 .../k0s}/templates/statefulset.yaml | 3 + charts/k0s/values.yaml | 153 ++++ charts/k3s/.helmignore | 21 + {chart => charts/k3s}/Chart.yaml | 0 charts/k3s/templates/NOTES.txt | 8 + charts/k3s/templates/_helpers.tpl | 59 ++ charts/k3s/templates/ingress.yaml | 26 + charts/k3s/templates/rbac/clusterrole.yaml | 27 + .../templates/rbac/clusterrolebinding.yaml | 23 + charts/k3s/templates/rbac/role.yaml | 30 + charts/k3s/templates/rbac/rolebinding.yaml | 24 + charts/k3s/templates/service.yaml | 20 + charts/k3s/templates/serviceaccount.yaml | 16 + charts/k3s/templates/statefulset-service.yaml | 20 + charts/k3s/templates/statefulset.yaml | 156 +++++ {chart => charts/k3s}/values.yaml | 13 +- cmd/vcluster/cmd/root.go | 25 + cmd/vcluster/cmd/start.go | 528 ++++++++++++++ cmd/vcluster/main.go | 532 +------------- cmd/vclusterctl/cmd/app/create/types.go | 23 + .../cmd/app/create/values/default.go | 41 ++ cmd/vclusterctl/cmd/app/create/values/k0s.go | 45 ++ cmd/vclusterctl/cmd/app/create/values/k3s.go | 162 +++++ cmd/vclusterctl/cmd/create.go | 223 +----- devspace.yaml | 5 +- docs/pages/operator/other-distributions.mdx | 63 +- pkg/certs/cert_list.go | 419 +++++++++++ pkg/certs/certs.go | 245 +++++++ pkg/certs/constants.go | 103 +++ pkg/certs/init.go | 126 ++++ pkg/certs/kubeconfig.go | 293 ++++++++ pkg/certs/kubeconfig_util.go | 72 ++ pkg/certs/types.go | 174 +++++ pkg/certs/util.go | 656 ++++++++++++++++++ 50 files changed, 3603 insertions(+), 804 deletions(-) delete mode 100644 chart/templates/secret.yaml rename {chart => charts/k0s}/.helmignore (100%) create mode 100644 charts/k0s/Chart.yaml rename {chart => charts/k0s}/templates/NOTES.txt (100%) rename {chart => charts/k0s}/templates/_helpers.tpl (100%) rename {chart => charts/k0s}/templates/ingress.yaml (100%) rename {chart => charts/k0s}/templates/rbac/clusterrole.yaml (100%) rename {chart => charts/k0s}/templates/rbac/clusterrolebinding.yaml (100%) rename {chart => charts/k0s}/templates/rbac/role.yaml (100%) rename {chart => charts/k0s}/templates/rbac/rolebinding.yaml (100%) create mode 100644 charts/k0s/templates/secret.yaml rename {chart => charts/k0s}/templates/service.yaml (100%) rename {chart => charts/k0s}/templates/serviceaccount.yaml (100%) rename {chart => charts/k0s}/templates/statefulset-service.yaml (100%) rename {chart => charts/k0s}/templates/statefulset.yaml (98%) create mode 100644 charts/k0s/values.yaml create mode 100644 charts/k3s/.helmignore rename {chart => charts/k3s}/Chart.yaml (100%) create mode 100644 charts/k3s/templates/NOTES.txt create mode 100644 charts/k3s/templates/_helpers.tpl create mode 100644 charts/k3s/templates/ingress.yaml create mode 100644 charts/k3s/templates/rbac/clusterrole.yaml create mode 100644 charts/k3s/templates/rbac/clusterrolebinding.yaml create mode 100644 charts/k3s/templates/rbac/role.yaml create mode 100644 charts/k3s/templates/rbac/rolebinding.yaml create mode 100644 charts/k3s/templates/service.yaml create mode 100644 charts/k3s/templates/serviceaccount.yaml create mode 100644 charts/k3s/templates/statefulset-service.yaml create mode 100644 charts/k3s/templates/statefulset.yaml rename {chart => charts/k3s}/values.yaml (88%) create mode 100644 cmd/vcluster/cmd/root.go create mode 100644 cmd/vcluster/cmd/start.go create mode 100644 cmd/vclusterctl/cmd/app/create/types.go create mode 100644 cmd/vclusterctl/cmd/app/create/values/default.go create mode 100644 cmd/vclusterctl/cmd/app/create/values/k0s.go create mode 100644 cmd/vclusterctl/cmd/app/create/values/k3s.go create mode 100644 pkg/certs/cert_list.go create mode 100644 pkg/certs/certs.go create mode 100644 pkg/certs/constants.go create mode 100644 pkg/certs/init.go create mode 100644 pkg/certs/kubeconfig.go create mode 100644 pkg/certs/kubeconfig_util.go create mode 100644 pkg/certs/types.go create mode 100644 pkg/certs/util.go diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 28e1c3d84..0e6d6a588 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -87,7 +87,8 @@ jobs: RELEASE_VERSION=$(echo $GITHUB_REF | sed -nE 's!refs/tags/v!!p') helm plugin install https://github.com/chartmuseum/helm-push.git helm repo add chartmuseum $CHART_MUSEUM_URL --username $CHART_MUSEUM_USER --password $CHART_MUSEUM_PASSWORD - helm cm-push --force --version="$RELEASE_VERSION" chart/ chartmuseum + helm cm-push --force --version="$RELEASE_VERSION" charts/k3s/ chartmuseum + helm cm-push --force --version="$RELEASE_VERSION" charts/k0s/ chartmuseum env: CHART_MUSEUM_URL: "https://charts.loft.sh/" CHART_MUSEUM_USER: ${{ secrets.CHART_MUSEUM_USER }} diff --git a/Dockerfile b/Dockerfile index 5bd0b957d..73aeeae8e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -53,4 +53,4 @@ WORKDIR / COPY --from=builder /vcluster/vcluster . COPY manifests/ /manifests/ -ENTRYPOINT ["/vcluster"] +ENTRYPOINT ["/vcluster", "start"] diff --git a/chart/templates/secret.yaml b/chart/templates/secret.yaml deleted file mode 100644 index 292b84166..000000000 --- a/chart/templates/secret.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if .Values.secret }} -{{- if .Values.secret.name }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ .Values.secret.name }} - namespace: {{ .Release.Namespace }} - labels: - app: vcluster - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -type: Opaque -stringData: - {{- range $key, $value := .Values.secret.data }} - {{ $key }}: {{ toJson $value }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/chart/.helmignore b/charts/k0s/.helmignore similarity index 100% rename from chart/.helmignore rename to charts/k0s/.helmignore diff --git a/charts/k0s/Chart.yaml b/charts/k0s/Chart.yaml new file mode 100644 index 000000000..721e717bb --- /dev/null +++ b/charts/k0s/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +name: vcluster-k0s +description: vcluster - Virtual Kubernetes Clusters (k0s) + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +version: 0.0.1 # version is auto-generated by release pipeline diff --git a/chart/templates/NOTES.txt b/charts/k0s/templates/NOTES.txt similarity index 100% rename from chart/templates/NOTES.txt rename to charts/k0s/templates/NOTES.txt diff --git a/chart/templates/_helpers.tpl b/charts/k0s/templates/_helpers.tpl similarity index 100% rename from chart/templates/_helpers.tpl rename to charts/k0s/templates/_helpers.tpl diff --git a/chart/templates/ingress.yaml b/charts/k0s/templates/ingress.yaml similarity index 100% rename from chart/templates/ingress.yaml rename to charts/k0s/templates/ingress.yaml diff --git a/chart/templates/rbac/clusterrole.yaml b/charts/k0s/templates/rbac/clusterrole.yaml similarity index 100% rename from chart/templates/rbac/clusterrole.yaml rename to charts/k0s/templates/rbac/clusterrole.yaml diff --git a/chart/templates/rbac/clusterrolebinding.yaml b/charts/k0s/templates/rbac/clusterrolebinding.yaml similarity index 100% rename from chart/templates/rbac/clusterrolebinding.yaml rename to charts/k0s/templates/rbac/clusterrolebinding.yaml diff --git a/chart/templates/rbac/role.yaml b/charts/k0s/templates/rbac/role.yaml similarity index 100% rename from chart/templates/rbac/role.yaml rename to charts/k0s/templates/rbac/role.yaml diff --git a/chart/templates/rbac/rolebinding.yaml b/charts/k0s/templates/rbac/rolebinding.yaml similarity index 100% rename from chart/templates/rbac/rolebinding.yaml rename to charts/k0s/templates/rbac/rolebinding.yaml diff --git a/charts/k0s/templates/secret.yaml b/charts/k0s/templates/secret.yaml new file mode 100644 index 000000000..9c850e4e6 --- /dev/null +++ b/charts/k0s/templates/secret.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: Secret +metadata: + name: vc-{{ .Release.Name }}-config + namespace: {{ .Release.Namespace }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +stringData: + {{- if .Values.config }} + config.yaml: {{ toJson .Values.config }} + {{- else }} + config.yaml: |- + apiVersion: k0s.k0sproject.io/v1beta1 + kind: Cluster + metadata: + name: k0s + spec: + api: + port: 6443 + k0sApiPort: 9443 + extraArgs: + enable-admission-plugins: NodeRestriction + network: + # Will be replaced automatically from the vcluster cli + serviceCIDR: {{ .Values.serviceCIDR }} + provider: custom + controllerManager: + extraArgs: + controllers: '*,-nodeipam,-nodelifecycle,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle' + {{- end }} \ No newline at end of file diff --git a/chart/templates/service.yaml b/charts/k0s/templates/service.yaml similarity index 100% rename from chart/templates/service.yaml rename to charts/k0s/templates/service.yaml diff --git a/chart/templates/serviceaccount.yaml b/charts/k0s/templates/serviceaccount.yaml similarity index 100% rename from chart/templates/serviceaccount.yaml rename to charts/k0s/templates/serviceaccount.yaml diff --git a/chart/templates/statefulset-service.yaml b/charts/k0s/templates/statefulset-service.yaml similarity index 100% rename from chart/templates/statefulset-service.yaml rename to charts/k0s/templates/statefulset-service.yaml diff --git a/chart/templates/statefulset.yaml b/charts/k0s/templates/statefulset.yaml similarity index 98% rename from chart/templates/statefulset.yaml rename to charts/k0s/templates/statefulset.yaml index 82f65b5ad..fee57a981 100644 --- a/chart/templates/statefulset.yaml +++ b/charts/k0s/templates/statefulset.yaml @@ -57,6 +57,9 @@ spec: serviceAccountName: vc-{{ .Release.Name }} {{- end }} volumes: + - name: k0s-config + secret: + secretName: vc-{{ .Release.Name }}-config {{- if .Values.volumes }} {{ toYaml .Values.volumes | indent 8 }} {{- end }} diff --git a/charts/k0s/values.yaml b/charts/k0s/values.yaml new file mode 100644 index 000000000..208280ceb --- /dev/null +++ b/charts/k0s/values.yaml @@ -0,0 +1,153 @@ +# Make sure the service-cidr is the exact service cidr of the host cluster. +# If this does not match, you won't be able to create services within the vcluster. You can find out +# the service cidr of the host cluster by creating a service with a not allowed ClusterIP in the host cluster. +# This will yield an error message in the form of: +# The Service "faulty-service" is invalid: spec.clusterIP: Invalid value: "1.1.1.1": provided IP is not in the valid range. The range of valid IPs is 10.96.0.0/12 +serviceCIDR: "10.96.0.0/12" + +# Syncer configuration +syncer: + # Image to use for the syncer + # image: loftsh/vcluster + extraArgs: + - --request-header-ca-cert=/data/k0s/pki/ca.crt + - --client-ca-cert=/data/k0s/pki/ca.crt + - --server-ca-cert=/data/k0s/pki/ca.crt + - --server-ca-key=/data/k0s/pki/ca.key + - --kube-config=/data/k0s/pki/admin.conf + env: [] + livenessProbe: + enabled: true + readinessProbe: + enabled: true + volumeMounts: + - mountPath: /data + name: data + readOnly: true + resources: + limits: + memory: 1Gi + requests: + cpu: 100m + memory: 128Mi + +# Virtual Cluster (k0s) configuration +vcluster: + # Image to use for the virtual cluster + image: k0sproject/k0s:v1.22.4-k0s.0 + command: + - k0s + baseArgs: + - controller + - --config=/etc/k0s/config.yaml + - --data-dir=/data/k0s + - --disable-components=konnectivity-server,kube-scheduler,csr-approver,default-psp,kube-proxy,coredns,network-provider,helm,metrics-server,kubelet-config + # Extra arguments for k0s. + extraArgs: [] + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/k0s + name: k0s-config + env: [] + resources: + limits: + memory: 2Gi + requests: + cpu: 200m + memory: 256Mi + +# Storage settings for the vcluster +storage: + # If this is disabled, vcluster will use an emptyDir instead + # of a PersistentVolumeClaim + persistence: true + # Size of the persistent volume claim + size: 5Gi + # Optional StorageClass used for the pvc + # if empty default StorageClass defined in your host cluster will be used + #className: + +# Extra volumes that should be created for the StatefulSet +volumes: [] + +# Service account that should be used by the vcluster +serviceAccount: + create: true + # Optional name of the service account to use + # name: default + +# Roles & ClusterRoles for the vcluster +rbac: + clusterRole: + # Enable this to let the vcluster sync + # real nodes, storage classes and priority classes + create: false + role: + # This is required for basic functionality of vcluster + create: true + +# The amount of replicas to run the statefulset with +replicas: 1 + +# NodeSelector used to schedule the vcluster +nodeSelector: {} + +# Affinity to apply to the vcluster statefulset +affinity: {} + +# Tolerations to apply to the vcluster statefulset +tolerations: [] + +# Extra Labels for the stateful set +labels: {} + +# Extra Annotations for the stateful set +annotations: {} + +# Service configurations +service: + type: ClusterIP + +# Configure the ingress resource that allows you to access the vcluster +ingress: + # Enable ingress record generation + enabled: false + # Ingress path type + pathType: ImplementationSpecific + apiVersion: networking.k8s.io/v1 + ingressClassName: "" + host: vcluster.local + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + +# Configure SecurityContext of the containers in the VCluster pod +securityContext: + allowPrivilegeEscalation: false + # capabilities: + # drop: + # - all + # readOnlyRootFilesystem will be set to true by default at a later release + # currently leaving it undefined for backwards compatibility with older vcluster cli versions + # readOnlyRootFilesystem: true + + # To run vcluster pod as non-root uncomment runAsUser and runAsNonRoot values. + # Update the runAsUser value if your cluster has limitations on user UIDs. + # For installation on OpenShift leave the runAsUser undefined (commented out). + # runAsUser: 12345 + # runAsNonRoot: true + +# Custom k0s to deploy +#config: |- +# apiVersion: k0s.k0sproject.io/v1beta1 +# ... + +# Set "enable" to true when running vcluster in an OpenShift host +# This will add an extra rule to the deployed role binding in order +# to manage service endpoints +openshift: + enable: false + \ No newline at end of file diff --git a/charts/k3s/.helmignore b/charts/k3s/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/k3s/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/chart/Chart.yaml b/charts/k3s/Chart.yaml similarity index 100% rename from chart/Chart.yaml rename to charts/k3s/Chart.yaml diff --git a/charts/k3s/templates/NOTES.txt b/charts/k3s/templates/NOTES.txt new file mode 100644 index 000000000..c32a7790f --- /dev/null +++ b/charts/k3s/templates/NOTES.txt @@ -0,0 +1,8 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. + +To learn more about the release, try: + + $ helm status {{ .Release.Name }} + $ helm get all {{ .Release.Name }} \ No newline at end of file diff --git a/charts/k3s/templates/_helpers.tpl b/charts/k3s/templates/_helpers.tpl new file mode 100644 index 000000000..fe19f0d60 --- /dev/null +++ b/charts/k3s/templates/_helpers.tpl @@ -0,0 +1,59 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "vcluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "vcluster.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "vcluster.clusterRoleName" -}} +{{- printf "vc-%s-v-%s" .Release.Name .Release.Namespace | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "vcluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "vcluster.labels" -}} +app.kubernetes.io/name: {{ include "vcluster.name" . }} +helm.sh/chart: {{ include "vcluster.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- else }} +app.kubernetes.io/version: {{ .Chart.Version | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Get +*/}} +{{- $}} +{{- define "vcluster.admin.accessKey" -}} +{{- now | unixEpoch | toString | trunc 8 | sha256sum -}} +{{- end -}} \ No newline at end of file diff --git a/charts/k3s/templates/ingress.yaml b/charts/k3s/templates/ingress.yaml new file mode 100644 index 000000000..0705358bb --- /dev/null +++ b/charts/k3s/templates/ingress.yaml @@ -0,0 +1,26 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ .Values.ingress.apiVersion }} +kind: Ingress +metadata: + {{- if .Values.ingress.annotations }} + annotations: + {{- toYaml .Values.ingress.annotations | nindent 4 }} + {{- end }} + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + - host: {{ .Values.ingress.host | quote }} + http: + paths: + - backend: + service: + name: {{ .Release.Name }} + port: + name: https + path: / + pathType: {{ .Values.ingress.pathType }} +{{- end }} diff --git a/charts/k3s/templates/rbac/clusterrole.yaml b/charts/k3s/templates/rbac/clusterrole.yaml new file mode 100644 index 000000000..bfda471c9 --- /dev/null +++ b/charts/k3s/templates/rbac/clusterrole.yaml @@ -0,0 +1,27 @@ +{{- if .Values.rbac.clusterRole.create }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "vcluster.clusterRoleName" . }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +rules: + - apiGroups: [""] + resources: ["nodes", "nodes/status"] + verbs: ["get", "watch", "list", "update", "patch"] + - apiGroups: [""] + resources: ["pods", "nodes/proxy", "nodes/metrics", "nodes/stats"] + verbs: ["get", "watch", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["create", "delete", "patch", "update", "get", "watch", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "watch", "list"] + - apiGroups: ["scheduling.k8s.io"] + resources: ["priorityclasses"] + verbs: ["create", "delete", "patch", "update", "get", "list", "watch"] +{{- end }} \ No newline at end of file diff --git a/charts/k3s/templates/rbac/clusterrolebinding.yaml b/charts/k3s/templates/rbac/clusterrolebinding.yaml new file mode 100644 index 000000000..44606a040 --- /dev/null +++ b/charts/k3s/templates/rbac/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if .Values.rbac.clusterRole.create }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "vcluster.clusterRoleName" . }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +subjects: + - kind: ServiceAccount + {{- if .Values.serviceAccount.name }} + name: {{ .Values.serviceAccount.name }} + {{- else }} + name: vc-{{ .Release.Name }} + {{- end }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ template "vcluster.clusterRoleName" . }} + apiGroup: rbac.authorization.k8s.io +{{- end }} \ No newline at end of file diff --git a/charts/k3s/templates/rbac/role.yaml b/charts/k3s/templates/rbac/role.yaml new file mode 100644 index 000000000..5afa1ed91 --- /dev/null +++ b/charts/k3s/templates/rbac/role.yaml @@ -0,0 +1,30 @@ +{{- if .Values.rbac.role.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +rules: + - apiGroups: [""] + resources: ["configmaps", "secrets", "services", "pods", "pods/attach", "pods/portforward", "pods/exec", "endpoints", "persistentvolumeclaims"] + verbs: ["create", "delete", "patch", "update", "get", "list", "watch"] + - apiGroups: [""] + resources: ["events", "pods/log"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["create", "delete", "patch", "update", "get", "list", "watch"] + - apiGroups: ["apps"] + resources: ["statefulsets", "replicasets", "deployments"] + verbs: ["get", "list", "watch"] +{{- if .Values.openshift.enable }} + - apiGroups: [""] + resources: ["endpoints/restricted"] + verbs: ["create"] +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/k3s/templates/rbac/rolebinding.yaml b/charts/k3s/templates/rbac/rolebinding.yaml new file mode 100644 index 000000000..e751c0e41 --- /dev/null +++ b/charts/k3s/templates/rbac/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.role.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +subjects: + - kind: ServiceAccount + {{- if .Values.serviceAccount.name }} + name: {{ .Values.serviceAccount.name }} + {{- else }} + name: vc-{{ .Release.Name }} + {{- end }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ .Release.Name }} + apiGroup: rbac.authorization.k8s.io +{{- end }} \ No newline at end of file diff --git a/charts/k3s/templates/service.yaml b/charts/k3s/templates/service.yaml new file mode 100644 index 000000000..d574bb686 --- /dev/null +++ b/charts/k3s/templates/service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: {{ .Values.service.type }} + ports: + - name: https + port: 443 + targetPort: 8443 + protocol: TCP + selector: + app: vcluster + release: {{ .Release.Name }} diff --git a/charts/k3s/templates/serviceaccount.yaml b/charts/k3s/templates/serviceaccount.yaml new file mode 100644 index 000000000..14a522afe --- /dev/null +++ b/charts/k3s/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vc-{{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.serviceAccount.imagePullSecrets }} +imagePullSecrets: +{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/k3s/templates/statefulset-service.yaml b/charts/k3s/templates/statefulset-service.yaml new file mode 100644 index 000000000..ff0fbf6bb --- /dev/null +++ b/charts/k3s/templates/statefulset-service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-headless + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "vcluster.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + ports: + - name: https + port: 443 + targetPort: 8443 + protocol: TCP + clusterIP: None + selector: + app: vcluster + release: "{{ .Release.Name }}" \ No newline at end of file diff --git a/charts/k3s/templates/statefulset.yaml b/charts/k3s/templates/statefulset.yaml new file mode 100644 index 000000000..4ae80ede9 --- /dev/null +++ b/charts/k3s/templates/statefulset.yaml @@ -0,0 +1,156 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.labels }} +{{ toYaml .Values.labels | indent 4 }} +{{- end }} + {{- if .Values.annotations }} + annotations: +{{ toYaml .Values.annotations | indent 4 }} + {{- end }} +spec: + serviceName: {{ .Release.Name }}-headless + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: vcluster + release: {{ .Release.Name }} + {{- if .Values.storage.persistence }} + {{- if not .Values.storage.volumeClaimTemplates }} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.storage.className }} + resources: + requests: + storage: {{ .Values.storage.size }} + {{- else }} + volumeClaimTemplates: +{{ toYaml .Values.volumeClaimTemplates | indent 4 }} + {{- end }} + {{- end }} + template: + metadata: + labels: + app: vcluster + release: {{ .Release.Name }} + spec: + terminationGracePeriodSeconds: 10 + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- if .Values.serviceAccount.name }} + serviceAccountName: {{ .Values.serviceAccount.name }} + {{- else }} + serviceAccountName: vc-{{ .Release.Name }} + {{- end }} + volumes: + {{- if .Values.volumes }} +{{ toYaml .Values.volumes | indent 8 }} + {{- end }} + {{- if not .Values.storage.persistence }} + - name: data + emptyDir: {} + {{- end }} + containers: + {{- if not .Values.vcluster.disabled }} + - image: {{ .Values.vcluster.image }} + name: vcluster + command: + {{- range $f := .Values.vcluster.command }} + - {{ $f | quote }} + {{- end }} + args: + {{- range $f := .Values.vcluster.baseArgs }} + - {{ $f | quote }} + {{- end }} + {{- if .Values.serviceCIDR }} + - --service-cidr={{ .Values.serviceCIDR }} + {{- end }} + {{- range $f := .Values.vcluster.extraArgs }} + - {{ $f | quote }} + {{- end }} + env: +{{ toYaml .Values.vcluster.env | indent 10 }} + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} + volumeMounts: +{{ toYaml .Values.vcluster.volumeMounts | indent 10 }} + resources: +{{ toYaml .Values.vcluster.resources | indent 10 }} + {{- end }} + {{- if not .Values.syncer.disabled }} + - name: syncer + {{- if .Values.syncer.image }} + image: "{{ .Values.syncer.image }}" + {{- else }} + image: "loftsh/vcluster:{{ .Chart.Version }}" + {{- end }} + {{- if .Values.syncer.workingDir }} + workingDir: {{ .Values.syncer.workingDir }} + {{- end }} + {{- if .Values.syncer.command }} + command: + {{- range $f := .Values.syncer.command }} + - {{ $f | quote }} + {{- end }} + {{- end }} + {{- if not .Values.syncer.noArgs }} + args: + - --service-name={{ .Release.Name }} + - --suffix={{ .Release.Name }} + - --set-owner + {{- if .Values.ingress.enabled }} + - --tls-san={{ .Values.ingress.host }} + {{- end }} + {{- range $f := .Values.syncer.extraArgs }} + - {{ $f | quote }} + {{- end }} + {{- else }} + args: +{{ toYaml .Values.syncer.extraArgs | indent 10 }} + {{- end }} + {{- if .Values.syncer.livenessProbe }} + {{- if .Values.syncer.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + failureThreshold: 10 + initialDelaySeconds: 60 + periodSeconds: 2 + {{- end }} + {{- end }} + {{- if .Values.syncer.readinessProbe }} + {{- if .Values.syncer.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /readyz + port: 8443 + scheme: HTTPS + failureThreshold: 30 + periodSeconds: 2 + {{- end }} + {{- end }} + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} + env: +{{ toYaml .Values.syncer.env | indent 10 }} + volumeMounts: +{{ toYaml .Values.syncer.volumeMounts | indent 10 }} + resources: +{{ toYaml .Values.syncer.resources | indent 10 }} + {{- end }} diff --git a/chart/values.yaml b/charts/k3s/values.yaml similarity index 88% rename from chart/values.yaml rename to charts/k3s/values.yaml index 422e46c11..cf74b70e9 100644 --- a/chart/values.yaml +++ b/charts/k3s/values.yaml @@ -1,3 +1,10 @@ +# Make sure the service-cidr is the exact service cidr of the host cluster. +# If this does not match, you won't be able to create services within the vcluster. You can find out +# the service cidr of the host cluster by creating a service with a not allowed ClusterIP in the host cluster. +# This will yield an error message in the form of: +# The Service "faulty-service" is invalid: spec.clusterIP: Invalid value: "1.1.1.1": provided IP is not in the valid range. The range of valid IPs is 10.96.0.0/12 +#serviceCIDR: "10.96.0.0/12" + # Syncer configuration syncer: # Image to use for the syncer @@ -41,8 +48,7 @@ vcluster: # the service cidr of the host cluster by creating a service with a not allowed ClusterIP in the host cluster. # This will yield an error message in the form of: # The Service "faulty-service" is invalid: spec.clusterIP: Invalid value: "1.1.1.1": provided IP is not in the valid range. The range of valid IPs is 10.96.0.0/12 - extraArgs: - - --service-cidr=10.96.0.0/12 + extraArgs: [] volumeMounts: - mountPath: /data name: data @@ -137,9 +143,6 @@ securityContext: # runAsUser: 12345 # runAsNonRoot: true -# Extra secret to deploy, can be useful if other k8s distributions such as k0s are used -secret: {} - # Set "enable" to true when running vcluster in an OpenShift host # This will add an extra rule to the deployed role binding in order # to manage service endpoints diff --git a/cmd/vcluster/cmd/root.go b/cmd/vcluster/cmd/root.go new file mode 100644 index 000000000..57f8db1bf --- /dev/null +++ b/cmd/vcluster/cmd/root.go @@ -0,0 +1,25 @@ +package cmd + +import ( + "github.com/spf13/cobra" +) + +// NewRootCmd returns a new root command +func NewRootCmd() *cobra.Command { + return &cobra.Command{ + Use: "vcluster", + SilenceUsage: true, + SilenceErrors: true, + Short: "Welcome to vcluster!", + Long: `vcluster root command`, + } +} + +// BuildRoot creates a new root command from the +func BuildRoot() *cobra.Command { + rootCmd := NewRootCmd() + + // add top level commands + rootCmd.AddCommand(NewStartCommand()) + return rootCmd +} diff --git a/cmd/vcluster/cmd/start.go b/cmd/vcluster/cmd/start.go new file mode 100644 index 000000000..f3468a02c --- /dev/null +++ b/cmd/vcluster/cmd/start.go @@ -0,0 +1,528 @@ +package cmd + +import ( + "context" + "fmt" + context2 "github.com/loft-sh/vcluster/cmd/vcluster/context" + "github.com/loft-sh/vcluster/pkg/apis" + "github.com/loft-sh/vcluster/pkg/controllers" + "github.com/loft-sh/vcluster/pkg/controllers/resources/endpoints" + "github.com/loft-sh/vcluster/pkg/controllers/resources/nodes" + "github.com/loft-sh/vcluster/pkg/controllers/resources/nodes/nodeservice" + translatepods "github.com/loft-sh/vcluster/pkg/controllers/resources/pods/translate" + "github.com/loft-sh/vcluster/pkg/controllers/resources/services" + "github.com/loft-sh/vcluster/pkg/coredns" + "github.com/loft-sh/vcluster/pkg/leaderelection" + "github.com/loft-sh/vcluster/pkg/server" + "github.com/loft-sh/vcluster/pkg/util/blockingcacheclient" + "github.com/loft-sh/vcluster/pkg/util/clienthelper" + "github.com/loft-sh/vcluster/pkg/util/kubeconfig" + "github.com/loft-sh/vcluster/pkg/util/translate" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "io/ioutil" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/klog" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + "math" + "os" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "time" +) + +var ( + scheme = runtime.NewScheme() +) + +func init() { + _ = clientgoscheme.AddToScheme(scheme) + // API extensions are not in the above scheme set, + // and must thus be added separately. + _ = apiextensionsv1beta1.AddToScheme(scheme) + _ = apiextensionsv1.AddToScheme(scheme) + _ = apiregistrationv1.AddToScheme(scheme) + + // Register the fake conversions + _ = apis.RegisterConversions(scheme) +} + +func NewStartCommand() *cobra.Command { + options := &context2.VirtualClusterOptions{} + cmd := &cobra.Command{ + Use: "start", + Short: "Execute the vcluster", + Args: cobra.NoArgs, + RunE: func(cobraCmd *cobra.Command, args []string) error { + return ExecuteStart(options) + }, + } + + cmd.Flags().StringVar(&options.RequestHeaderCaCert, "request-header-ca-cert", "/data/server/tls/request-header-ca.crt", "The path to the request header ca certificate") + cmd.Flags().StringVar(&options.ClientCaCert, "client-ca-cert", "/data/server/tls/client-ca.crt", "The path to the client ca certificate") + cmd.Flags().StringVar(&options.ServerCaCert, "server-ca-cert", "/data/server/tls/server-ca.crt", "The path to the server ca certificate") + cmd.Flags().StringVar(&options.ServerCaKey, "server-ca-key", "/data/server/tls/server-ca.key", "The path to the server ca key") + cmd.Flags().StringVar(&options.KubeConfig, "kube-config", "/data/server/cred/admin.kubeconfig", "The path to the virtual cluster admin kube config") + cmd.Flags().StringSliceVar(&options.TlsSANs, "tls-san", []string{}, "Add additional hostname or IP as a Subject Alternative Name in the TLS cert") + cmd.Flags().StringVar(&options.DisableSyncResources, "disable-sync-resources", "", "The resources that shouldn't be synced by the virtual cluster (e.g. ingresses)") + + cmd.Flags().StringVar(&options.KubeConfigSecret, "out-kube-config-secret", "", "If specified, the virtual cluster will write the generated kube config to the given secret") + cmd.Flags().StringVar(&options.KubeConfigSecretNamespace, "out-kube-config-secret-namespace", "", "If specified, the virtual cluster will write the generated kube config in the given namespace") + cmd.Flags().StringVar(&options.KubeConfigServer, "out-kube-config-server", "", "If specified, the virtual cluster will use this server for the generated kube config (e.g. https://my-vcluster.domain.com)") + + cmd.Flags().StringVar(&options.TargetNamespace, "target-namespace", "", "The namespace to run the virtual cluster in (defaults to current namespace)") + cmd.Flags().StringVar(&options.ServiceName, "service-name", "vcluster", "The service name where the vcluster proxy will be available") + cmd.Flags().StringVar(&options.ServiceNamespace, "service-namespace", "", "The service namespace where the vcluster proxy will be available. If empty defaults to the current namespace") + cmd.Flags().BoolVar(&options.SetOwner, "set-owner", false, "If true, will set the same owner the currently running syncer pod has on the synced resources") + cmd.Flags().StringVar(&options.DeprecatedOwningStatefulSet, "owning-statefulset", "", "DEPRECATED: use --set-owner instead") + + cmd.Flags().StringVar(&options.Suffix, "suffix", "vcluster", "The suffix to append to the synced resources in the namespace") + cmd.Flags().StringVar(&options.BindAddress, "bind-address", "0.0.0.0", "The address to bind the server to") + cmd.Flags().IntVar(&options.Port, "port", 8443, "The port to bind to") + + cmd.Flags().BoolVar(&options.SyncAllNodes, "sync-all-nodes", false, "If enabled and --fake-nodes is false, the virtual cluster will sync all nodes instead of only the needed ones") + cmd.Flags().BoolVar(&options.SyncNodeChanges, "sync-node-changes", false, "If enabled and --fake-nodes is false, the virtual cluster will proxy node updates from the virtual cluster to the host cluster. This is not recommended and should only be used if you know what you are doing.") + cmd.Flags().BoolVar(&options.UseFakeKubelets, "fake-kubelets", true, "If enabled, the virtual cluster will create fake kubelet endpoints to support metrics-servers") + + cmd.Flags().BoolVar(&options.UseFakeNodes, "fake-nodes", true, "If enabled, the virtual cluster will create fake nodes instead of copying the actual physical nodes config") + cmd.Flags().BoolVar(&options.UseFakePersistentVolumes, "fake-persistent-volumes", true, "If enabled, the virtual cluster will create fake persistent volumes instead of copying the actual physical persistent volumes config") + + cmd.Flags().BoolVar(&options.EnableStorageClasses, "enable-storage-classes", false, "If enabled, the virtual cluster will sync storage classes") + cmd.Flags().BoolVar(&options.EnablePriorityClasses, "enable-priority-classes", false, "If enabled, the virtual cluster will sync priority classes from and to the host cluster") + + cmd.Flags().StringSliceVar(&options.TranslateImages, "translate-image", []string{}, "Translates image names from the virtual pod to the physical pod (e.g. coredns/coredns=mirror.io/coredns/coredns)") + cmd.Flags().BoolVar(&options.EnforceNodeSelector, "enforce-node-selector", true, "If enabled and --node-selector is set then the virtual cluster will ensure that no pods are scheduled outside of the node selector") + cmd.Flags().StringVar(&options.NodeSelector, "node-selector", "", "If set, nodes with the given node selector will be synced to the virtual cluster. This will implicitly set --fake-nodes=false") + cmd.Flags().StringVar(&options.ServiceAccount, "service-account", "", "If set, will set this host service account on the synced pods") + + cmd.Flags().BoolVar(&options.OverrideHosts, "override-hosts", true, "If enabled, vcluster will override a containers /etc/hosts file if there is a subdomain specified for the pod (spec.subdomain).") + cmd.Flags().StringVar(&options.OverrideHostsContainerImage, "override-hosts-container-image", translatepods.HostsRewriteImage, "The image for the init container that is used for creating the override hosts file.") + + cmd.Flags().StringVar(&options.ClusterDomain, "cluster-domain", "cluster.local", "The cluster domain ending that should be used for the virtual cluster") + cmd.Flags().Int64Var(&options.LeaseDuration, "lease-duration", 60, "Lease duration of the leader election in seconds") + cmd.Flags().Int64Var(&options.RenewDeadline, "renew-deadline", 40, "Renew deadline of the leader election in seconds") + cmd.Flags().Int64Var(&options.RetryPeriod, "retry-period", 15, "Retry period of the leader election in seconds") + return cmd +} + +func ExecuteStart(options *context2.VirtualClusterOptions) error { + // wait until kube config is available + var clientConfig clientcmd.ClientConfig + err := wait.Poll(time.Second, time.Minute*10, func() (bool, error) { + out, err := ioutil.ReadFile(options.KubeConfig) + if err != nil { + if os.IsNotExist(err) { + klog.Info("couldn't find virtual cluster kube-config, will retry in 1 seconds") + return false, nil + } + + return false, err + } + + // parse virtual cluster config + clientConfig, err = clientcmd.NewClientConfigFromBytes(out) + if err != nil { + return false, errors.Wrap(err, "read kube config") + } + + restConfig, err := clientConfig.ClientConfig() + if err != nil { + return false, errors.Wrap(err, "read kube client config") + } + + kubeClient, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return false, errors.Wrap(err, "create kube client") + } + + _, err = kubeClient.Discovery().ServerVersion() + if err != nil { + klog.Infof("couldn't retrieve virtual cluster version (%v), will retry in 1 seconds", err) + return false, nil + } + _, err = kubeClient.CoreV1().ServiceAccounts("default").Get(context.Background(), "default", metav1.GetOptions{}) + if err != nil { + klog.Infof("default ServiceAccount is not available yet, will retry in 1 seconds") + return false, nil + } + + return true, nil + }) + if err != nil { + return err + } + + // set suffix + translate.Suffix = options.Suffix + if translate.Suffix == "" { + return fmt.Errorf("suffix cannot be empty") + } + + // set kubelet port + nodeservice.KubeletTargetPort = options.Port + + // get current namespace + currentNamespace, err := clienthelper.CurrentNamespace() + if err != nil { + return err + } + + // ensure target namespace + if options.TargetNamespace == "" { + options.TargetNamespace = currentNamespace + } + + // set service namespace + if options.ServiceNamespace == "" { + options.ServiceNamespace = currentNamespace + } + + rawConfig, err := clientConfig.RawConfig() + if err != nil { + return err + } + virtualClusterConfig, err := clientConfig.ClientConfig() + if err != nil { + return err + } + inClusterConfig := ctrl.GetConfigOrDie() + + // We increase the limits here so that we don't get any problems + virtualClusterConfig.QPS = 1000 + virtualClusterConfig.Burst = 2000 + virtualClusterConfig.Timeout = 0 + + inClusterConfig.QPS = 40 + inClusterConfig.Burst = 80 + inClusterConfig.Timeout = 0 + + klog.Info("Using physical cluster at " + inClusterConfig.Host) + localManager, err := ctrl.NewManager(inClusterConfig, ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: "0", + LeaderElection: false, + Namespace: options.TargetNamespace, + NewClient: blockingcacheclient.NewCacheClient, + }) + if err != nil { + return err + } + virtualClusterManager, err := ctrl.NewManager(virtualClusterConfig, ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: "0", + LeaderElection: false, + NewClient: blockingcacheclient.NewCacheClient, + }) + if err != nil { + return err + } + + // get virtual cluster version + discoveryClient, err := discovery.NewDiscoveryClientForConfig(virtualClusterConfig) + if err != nil { + return errors.Wrap(err, "create discovery client") + } + serverVersion, err := discoveryClient.ServerVersion() + if err != nil { + return errors.Wrap(err, "get virtual cluster version") + } + nodes.FakeNodesVersion = serverVersion.GitVersion + klog.Infof("Can connect to virtual cluster with version " + serverVersion.GitVersion) + + // setup CoreDNS according to the manifest file + go func() { + wait.ExponentialBackoff(wait.Backoff{Duration: time.Second, Factor: 1.5, Cap: time.Minute, Steps: math.MaxInt32}, func() (bool, error) { + err := coredns.ApplyManifest(*virtualClusterConfig, serverVersion) + if err != nil { + klog.Infof("Failed to apply CoreDNS cofiguration from the manifest file: %v", err) + return false, nil + } + klog.Infof("CoreDNS cofiguration from the manifest file applied successfully") + return true, nil + }) + }() + + // create controller context + ctx, err := context2.NewControllerContext(localManager, virtualClusterManager, options) + if err != nil { + return errors.Wrap(err, "create controller context") + } + + // register the indices + err = controllers.RegisterIndices(ctx) + if err != nil { + return errors.Wrap(err, "register controllers") + } + + // start the local manager + go func() { + err := localManager.Start(ctx.Context) + if err != nil { + panic(err) + } + }() + + // start the virtual cluster manager + go func() { + err := virtualClusterManager.Start(ctx.Context) + if err != nil { + panic(err) + } + }() + + // Wait for caches to be synced + localManager.GetCache().WaitForCacheSync(ctx.Context) + virtualClusterManager.GetCache().WaitForCacheSync(ctx.Context) + + // start leader election for controllers + go func() { + err = leaderelection.StartLeaderElection(ctx, scheme, func() error { + localClient, err := client.New(ctx.LocalManager.GetConfig(), client.Options{Scheme: ctx.LocalManager.GetScheme()}) + if err != nil { + return err + } + + // make sure owner is set if it is there + err = findOwner(ctx, localClient) + if err != nil { + return errors.Wrap(err, "set owner") + } + + // make sure the kubernetes service is synced + err = syncKubernetesService(ctx, localClient) + if err != nil { + return errors.Wrap(err, "sync kubernetes service") + } + + // start the node service provider + go func() { + ctx.NodeServiceProvider.Start(ctx.Context) + }() + + // register controllers + err = controllers.RegisterControllers(ctx) + if err != nil { + return err + } + + // write the kube config to secret + err = writeKubeConfigToSecret(ctx, &rawConfig) + if err != nil { + return err + } + + return nil + }) + if err != nil { + klog.Fatalf("Error starting leader election: %v", err) + } + }() + + // start the proxy + proxyServer, err := server.NewServer(ctx, options.RequestHeaderCaCert, options.ClientCaCert) + if err != nil { + return err + } + + // start the proxy server in secure mode + err = proxyServer.ServeOnListenerTLS(options.BindAddress, options.Port, ctx.StopChan) + if err != nil { + return err + } + + return nil +} + +func findOwner(ctx *context2.ControllerContext, localClient client.Client) error { + if ctx.Options.SetOwner { + // get current pod + podName, err := os.Hostname() + if err != nil { + klog.Errorf("Couldn't find current hostname: %v, will skip setting owner", err) + return nil // ignore error here + } + + pod := &corev1.Pod{} + err = localClient.Get(ctx.Context, types.NamespacedName{Namespace: ctx.Options.TargetNamespace, Name: podName}, pod) + if err != nil { + if kerrors.IsNotFound(err) { + klog.Errorf("Couldn't find current pod: %v, will skip setting owner", err) + return nil + } + + return errors.Wrap(err, "get owning pod") + } + + // check owner of pod + controller := metav1.GetControllerOf(pod) + if controller == nil { + klog.Errorf("No controller for pod %s/%s found, will skip setting owner", pod.Namespace, pod.Name) + return nil + } else if controller.APIVersion != appsv1.SchemeGroupVersion.String() || (controller.Kind != "ReplicaSet" && controller.Kind != "StatefulSet") { + klog.Errorf("Unsupported owner kind %s and apiVersion %s, will skip setting owner", controller.Kind, controller.APIVersion) + return nil + } + + // statefulset + if controller.Kind == "StatefulSet" { + statefulSet := &appsv1.StatefulSet{} + err = localClient.Get(ctx.Context, types.NamespacedName{Namespace: pod.Namespace, Name: controller.Name}, statefulSet) + if err != nil { + return errors.Wrap(err, "get owning stateful set") + } + + statefulSet.APIVersion = appsv1.SchemeGroupVersion.String() + statefulSet.Kind = "StatefulSet" + translate.Owner = statefulSet + return nil + } + + // replicaset + replicaSet := &appsv1.ReplicaSet{} + err = localClient.Get(ctx.Context, types.NamespacedName{Namespace: pod.Namespace, Name: controller.Name}, replicaSet) + if err != nil { + return errors.Wrap(err, "get owning replica set") + } + + // check owner of replica set + replicaSetController := metav1.GetControllerOf(replicaSet) + if controller == nil || replicaSetController.APIVersion != appsv1.SchemeGroupVersion.String() || replicaSetController.Kind != "Deployment" { + replicaSet.APIVersion = appsv1.SchemeGroupVersion.String() + replicaSet.Kind = "ReplicaSet" + translate.Owner = replicaSet + return nil + } + + // deployment + deployment := &appsv1.Deployment{} + err = localClient.Get(ctx.Context, types.NamespacedName{Namespace: pod.Namespace, Name: replicaSetController.Name}, deployment) + if err != nil { + return errors.Wrap(err, "get owning deployment") + } + + deployment.APIVersion = appsv1.SchemeGroupVersion.String() + deployment.Kind = "Deployment" + translate.Owner = deployment + return nil + } else if ctx.Options.DeprecatedOwningStatefulSet != "" { + statefulSet := &appsv1.StatefulSet{} + err := localClient.Get(ctx.Context, types.NamespacedName{Namespace: ctx.Options.TargetNamespace, Name: ctx.Options.DeprecatedOwningStatefulSet}, statefulSet) + if err != nil { + return errors.Wrap(err, "get owning statefulset") + } + + if statefulSet.Namespace == ctx.Options.TargetNamespace { + translate.Owner = statefulSet + } + } + + return nil +} + +func syncKubernetesService(ctx *context2.ControllerContext, localClient client.Client) error { + virtualClient, err := client.New(ctx.VirtualManager.GetConfig(), client.Options{Scheme: ctx.VirtualManager.GetScheme()}) + if err != nil { + return err + } + + err = services.SyncKubernetesService(ctx.Context, localClient, virtualClient, ctx.Options.ServiceNamespace, ctx.Options.ServiceName) + if err != nil { + return errors.Wrap(err, "sync kubernetes service") + } + + err = endpoints.SyncKubernetesServiceEndpoints(ctx.Context, localClient, virtualClient, ctx.Options.ServiceNamespace, ctx.Options.ServiceName) + if err != nil { + return errors.Wrap(err, "sync kubernetes service endpoints") + } + + return nil +} + +func writeKubeConfigToSecret(ctx *context2.ControllerContext, config *api.Config) error { + config = config.DeepCopy() + + // exchange kube config server & resolve certificate + for i := range config.Clusters { + // fill in data + if config.Clusters[i].CertificateAuthorityData == nil && config.Clusters[i].CertificateAuthority != "" { + o, err := ioutil.ReadFile(config.Clusters[i].CertificateAuthority) + if err != nil { + return err + } + + config.Clusters[i].CertificateAuthority = "" + config.Clusters[i].CertificateAuthorityData = o + } + + if ctx.Options.KubeConfigServer != "" { + config.Clusters[i].Server = ctx.Options.KubeConfigServer + } else { + config.Clusters[i].Server = fmt.Sprintf("https://localhost:%d", ctx.Options.Port) + } + } + + // resolve auth info cert & key + for i := range config.AuthInfos { + // fill in data + if config.AuthInfos[i].ClientCertificateData == nil && config.AuthInfos[i].ClientCertificate != "" { + o, err := ioutil.ReadFile(config.AuthInfos[i].ClientCertificate) + if err != nil { + return err + } + + config.AuthInfos[i].ClientCertificate = "" + config.AuthInfos[i].ClientCertificateData = o + } + if config.AuthInfos[i].ClientKeyData == nil && config.AuthInfos[i].ClientKey != "" { + o, err := ioutil.ReadFile(config.AuthInfos[i].ClientKey) + if err != nil { + return err + } + + config.AuthInfos[i].ClientKey = "" + config.AuthInfos[i].ClientKeyData = o + } + } + + // we have to create a new client here, because the cached version will always say + // the secret does not exist in another namespace + localClient, err := client.New(ctx.LocalManager.GetConfig(), client.Options{ + Scheme: ctx.LocalManager.GetScheme(), + Mapper: ctx.LocalManager.GetRESTMapper(), + }) + if err != nil { + return errors.Wrap(err, "create uncached client") + } + + // check if we need to write the kubeconfig secrete to the default location as well + if ctx.Options.KubeConfigSecret != "" { + // which namespace should we create the additional secret in? + secretNamespace := ctx.Options.KubeConfigSecretNamespace + if secretNamespace == "" { + secretNamespace = ctx.Options.TargetNamespace + } + err = kubeconfig.WriteKubeConfig(ctx.Context, localClient, ctx.Options.KubeConfigSecret, secretNamespace, config) + if err != nil { + return fmt.Errorf("creating %s secret in the %s ns failed: %v", ctx.Options.KubeConfigSecret, secretNamespace, err) + } + } + currentNamespace, err := clienthelper.CurrentNamespace() + if err != nil { + return err + } + // write the default Secret + return kubeconfig.WriteKubeConfig(ctx.Context, localClient, kubeconfig.GetDefaultSecretName(ctx.Options.Suffix), currentNamespace, config) +} diff --git a/cmd/vcluster/main.go b/cmd/vcluster/main.go index 764d7b7fb..928e80ed1 100644 --- a/cmd/vcluster/main.go +++ b/cmd/vcluster/main.go @@ -1,51 +1,12 @@ package main import ( - "context" - "fmt" - "io/ioutil" - "math" - "os" - "time" - - context2 "github.com/loft-sh/vcluster/cmd/vcluster/context" - "github.com/loft-sh/vcluster/pkg/apis" - "github.com/loft-sh/vcluster/pkg/controllers" - "github.com/loft-sh/vcluster/pkg/controllers/resources/endpoints" - "github.com/loft-sh/vcluster/pkg/controllers/resources/nodes" - "github.com/loft-sh/vcluster/pkg/controllers/resources/nodes/nodeservice" - translatepods "github.com/loft-sh/vcluster/pkg/controllers/resources/pods/translate" - "github.com/loft-sh/vcluster/pkg/controllers/resources/services" - "github.com/loft-sh/vcluster/pkg/coredns" - "github.com/loft-sh/vcluster/pkg/leaderelection" - "github.com/loft-sh/vcluster/pkg/server" - "github.com/loft-sh/vcluster/pkg/util/blockingcacheclient" - "github.com/loft-sh/vcluster/pkg/util/clienthelper" - "github.com/loft-sh/vcluster/pkg/util/kubeconfig" + "github.com/loft-sh/vcluster/cmd/vcluster/cmd" "github.com/loft-sh/vcluster/pkg/util/log" - "github.com/loft-sh/vcluster/pkg/util/translate" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/discovery" - "k8s.io/client-go/kubernetes" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/clientcmd/api" "k8s.io/klog" - apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + "os" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - // "go.uber.org/zap/zapcore" // zappkg "go.uber.org/zap" @@ -55,86 +16,8 @@ import ( _ "github.com/go-openapi/loads" _ "k8s.io/apimachinery/pkg/apis/meta/v1" _ "k8s.io/client-go/plugin/pkg/client/auth" // Enable cloud provider auth - - "github.com/spf13/cobra" -) - -var ( - scheme = runtime.NewScheme() ) -func init() { - _ = clientgoscheme.AddToScheme(scheme) - // API extensions are not in the above scheme set, - // and must thus be added separately. - _ = apiextensionsv1beta1.AddToScheme(scheme) - _ = apiextensionsv1.AddToScheme(scheme) - _ = apiregistrationv1.AddToScheme(scheme) - - // Register the fake conversions - _ = apis.RegisterConversions(scheme) -} - -func NewCommand() *cobra.Command { - options := &context2.VirtualClusterOptions{} - cmd := &cobra.Command{ - Use: "vcluster", - SilenceUsage: true, - SilenceErrors: true, - Short: "Welcome to Virtual Cluster!", - Args: cobra.NoArgs, - RunE: func(cobraCmd *cobra.Command, args []string) error { - return Execute(options) - }, - } - - cmd.Flags().StringVar(&options.RequestHeaderCaCert, "request-header-ca-cert", "/data/server/tls/request-header-ca.crt", "The path to the request header ca certificate") - cmd.Flags().StringVar(&options.ClientCaCert, "client-ca-cert", "/data/server/tls/client-ca.crt", "The path to the client ca certificate") - cmd.Flags().StringVar(&options.ServerCaCert, "server-ca-cert", "/data/server/tls/server-ca.crt", "The path to the server ca certificate") - cmd.Flags().StringVar(&options.ServerCaKey, "server-ca-key", "/data/server/tls/server-ca.key", "The path to the server ca key") - cmd.Flags().StringVar(&options.KubeConfig, "kube-config", "/data/server/cred/admin.kubeconfig", "The path to the virtual cluster admin kube config") - cmd.Flags().StringSliceVar(&options.TlsSANs, "tls-san", []string{}, "Add additional hostname or IP as a Subject Alternative Name in the TLS cert") - cmd.Flags().StringVar(&options.DisableSyncResources, "disable-sync-resources", "", "The resources that shouldn't be synced by the virtual cluster (e.g. ingresses)") - - cmd.Flags().StringVar(&options.KubeConfigSecret, "out-kube-config-secret", "", "If specified, the virtual cluster will write the generated kube config to the given secret") - cmd.Flags().StringVar(&options.KubeConfigSecretNamespace, "out-kube-config-secret-namespace", "", "If specified, the virtual cluster will write the generated kube config in the given namespace") - cmd.Flags().StringVar(&options.KubeConfigServer, "out-kube-config-server", "", "If specified, the virtual cluster will use this server for the generated kube config (e.g. https://my-vcluster.domain.com)") - - cmd.Flags().StringVar(&options.TargetNamespace, "target-namespace", "", "The namespace to run the virtual cluster in (defaults to current namespace)") - cmd.Flags().StringVar(&options.ServiceName, "service-name", "vcluster", "The service name where the vcluster proxy will be available") - cmd.Flags().StringVar(&options.ServiceNamespace, "service-namespace", "", "The service namespace where the vcluster proxy will be available. If empty defaults to the current namespace") - cmd.Flags().BoolVar(&options.SetOwner, "set-owner", false, "If true, will set the same owner the currently running syncer pod has on the synced resources") - cmd.Flags().StringVar(&options.DeprecatedOwningStatefulSet, "owning-statefulset", "", "DEPRECATED: use --set-owner instead") - - cmd.Flags().StringVar(&options.Suffix, "suffix", "vcluster", "The suffix to append to the synced resources in the namespace") - cmd.Flags().StringVar(&options.BindAddress, "bind-address", "0.0.0.0", "The address to bind the server to") - cmd.Flags().IntVar(&options.Port, "port", 8443, "The port to bind to") - - cmd.Flags().BoolVar(&options.SyncAllNodes, "sync-all-nodes", false, "If enabled and --fake-nodes is false, the virtual cluster will sync all nodes instead of only the needed ones") - cmd.Flags().BoolVar(&options.SyncNodeChanges, "sync-node-changes", false, "If enabled and --fake-nodes is false, the virtual cluster will proxy node updates from the virtual cluster to the host cluster. This is not recommended and should only be used if you know what you are doing.") - cmd.Flags().BoolVar(&options.UseFakeKubelets, "fake-kubelets", true, "If enabled, the virtual cluster will create fake kubelet endpoints to support metrics-servers") - - cmd.Flags().BoolVar(&options.UseFakeNodes, "fake-nodes", true, "If enabled, the virtual cluster will create fake nodes instead of copying the actual physical nodes config") - cmd.Flags().BoolVar(&options.UseFakePersistentVolumes, "fake-persistent-volumes", true, "If enabled, the virtual cluster will create fake persistent volumes instead of copying the actual physical persistent volumes config") - - cmd.Flags().BoolVar(&options.EnableStorageClasses, "enable-storage-classes", false, "If enabled, the virtual cluster will sync storage classes") - cmd.Flags().BoolVar(&options.EnablePriorityClasses, "enable-priority-classes", false, "If enabled, the virtual cluster will sync priority classes from and to the host cluster") - - cmd.Flags().StringSliceVar(&options.TranslateImages, "translate-image", []string{}, "Translates image names from the virtual pod to the physical pod (e.g. coredns/coredns=mirror.io/coredns/coredns)") - cmd.Flags().BoolVar(&options.EnforceNodeSelector, "enforce-node-selector", true, "If enabled and --node-selector is set then the virtual cluster will ensure that no pods are scheduled outside of the node selector") - cmd.Flags().StringVar(&options.NodeSelector, "node-selector", "", "If set, nodes with the given node selector will be synced to the virtual cluster. This will implicitly set --fake-nodes=false") - cmd.Flags().StringVar(&options.ServiceAccount, "service-account", "", "If set, will set this host service account on the synced pods") - - cmd.Flags().BoolVar(&options.OverrideHosts, "override-hosts", true, "If enabled, vcluster will override a containers /etc/hosts file if there is a subdomain specified for the pod (spec.subdomain).") - cmd.Flags().StringVar(&options.OverrideHostsContainerImage, "override-hosts-container-image", translatepods.HostsRewriteImage, "The image for the init container that is used for creating the override hosts file.") - - cmd.Flags().StringVar(&options.ClusterDomain, "cluster-domain", "cluster.local", "The cluster domain ending that should be used for the virtual cluster") - cmd.Flags().Int64Var(&options.LeaseDuration, "lease-duration", 60, "Lease duration of the leader election in seconds") - cmd.Flags().Int64Var(&options.RenewDeadline, "renew-deadline", 40, "Renew deadline of the leader election in seconds") - cmd.Flags().Int64Var(&options.RetryPeriod, "retry-period", 15, "Retry period of the leader election in seconds") - return cmd -} - func main() { // set global logger if os.Getenv("DEBUG") == "true" { @@ -144,417 +27,8 @@ func main() { } // create a new command and execute - err := NewCommand().Execute() + err := cmd.BuildRoot().Execute() if err != nil { klog.Fatal(err) } } - -func Execute(options *context2.VirtualClusterOptions) error { - // wait until kube config is available - var clientConfig clientcmd.ClientConfig - err := wait.Poll(time.Second, time.Minute*10, func() (bool, error) { - out, err := ioutil.ReadFile(options.KubeConfig) - if err != nil { - if os.IsNotExist(err) { - klog.Info("couldn't find virtual cluster kube-config, will retry in 1 seconds") - return false, nil - } - - return false, err - } - - // parse virtual cluster config - clientConfig, err = clientcmd.NewClientConfigFromBytes(out) - if err != nil { - return false, errors.Wrap(err, "read kube config") - } - - restConfig, err := clientConfig.ClientConfig() - if err != nil { - return false, errors.Wrap(err, "read kube client config") - } - - kubeClient, err := kubernetes.NewForConfig(restConfig) - if err != nil { - return false, errors.Wrap(err, "create kube client") - } - - _, err = kubeClient.Discovery().ServerVersion() - if err != nil { - klog.Infof("couldn't retrieve virtual cluster version (%v), will retry in 1 seconds", err) - return false, nil - } - _, err = kubeClient.CoreV1().ServiceAccounts("default").Get(context.Background(), "default", metav1.GetOptions{}) - if err != nil { - klog.Infof("default ServiceAccount is not available yet, will retry in 1 seconds") - return false, nil - } - - return true, nil - }) - if err != nil { - return err - } - - // set suffix - translate.Suffix = options.Suffix - if translate.Suffix == "" { - return fmt.Errorf("suffix cannot be empty") - } - - // set kubelet port - nodeservice.KubeletTargetPort = options.Port - - // get current namespace - currentNamespace, err := clienthelper.CurrentNamespace() - if err != nil { - return err - } - - // ensure target namespace - if options.TargetNamespace == "" { - options.TargetNamespace = currentNamespace - } - - // set service namespace - if options.ServiceNamespace == "" { - options.ServiceNamespace = currentNamespace - } - - rawConfig, err := clientConfig.RawConfig() - if err != nil { - return err - } - virtualClusterConfig, err := clientConfig.ClientConfig() - if err != nil { - return err - } - inClusterConfig := ctrl.GetConfigOrDie() - - // We increase the limits here so that we don't get any problems - virtualClusterConfig.QPS = 1000 - virtualClusterConfig.Burst = 2000 - virtualClusterConfig.Timeout = 0 - - inClusterConfig.QPS = 40 - inClusterConfig.Burst = 80 - inClusterConfig.Timeout = 0 - - klog.Info("Using physical cluster at " + inClusterConfig.Host) - localManager, err := ctrl.NewManager(inClusterConfig, ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: "0", - LeaderElection: false, - Namespace: options.TargetNamespace, - NewClient: blockingcacheclient.NewCacheClient, - }) - if err != nil { - return err - } - virtualClusterManager, err := ctrl.NewManager(virtualClusterConfig, ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: "0", - LeaderElection: false, - NewClient: blockingcacheclient.NewCacheClient, - }) - if err != nil { - return err - } - - // get virtual cluster version - discoveryClient, err := discovery.NewDiscoveryClientForConfig(virtualClusterConfig) - if err != nil { - return errors.Wrap(err, "create discovery client") - } - serverVersion, err := discoveryClient.ServerVersion() - if err != nil { - return errors.Wrap(err, "get virtual cluster version") - } - nodes.FakeNodesVersion = serverVersion.GitVersion - klog.Infof("Can connect to virtual cluster with version " + serverVersion.GitVersion) - - // setup CoreDNS according to the manifest file - go func() { - wait.ExponentialBackoff(wait.Backoff{Duration: time.Second, Factor: 1.5, Cap: time.Minute, Steps: math.MaxInt32}, func() (bool, error) { - err := coredns.ApplyManifest(*virtualClusterConfig, serverVersion) - if err != nil { - klog.Infof("Failed to apply CoreDNS cofiguration from the manifest file: %v", err) - return false, nil - } - klog.Infof("CoreDNS cofiguration from the manifest file applied successfully") - return true, nil - }) - }() - - // create controller context - ctx, err := context2.NewControllerContext(localManager, virtualClusterManager, options) - if err != nil { - return errors.Wrap(err, "create controller context") - } - - // register the indices - err = controllers.RegisterIndices(ctx) - if err != nil { - return errors.Wrap(err, "register controllers") - } - - // start the local manager - go func() { - err := localManager.Start(ctx.Context) - if err != nil { - panic(err) - } - }() - - // start the virtual cluster manager - go func() { - err := virtualClusterManager.Start(ctx.Context) - if err != nil { - panic(err) - } - }() - - // Wait for caches to be synced - localManager.GetCache().WaitForCacheSync(ctx.Context) - virtualClusterManager.GetCache().WaitForCacheSync(ctx.Context) - - // start leader election for controllers - go func() { - err = leaderelection.StartLeaderElection(ctx, scheme, func() error { - localClient, err := client.New(ctx.LocalManager.GetConfig(), client.Options{Scheme: ctx.LocalManager.GetScheme()}) - if err != nil { - return err - } - - // make sure owner is set if it is there - err = findOwner(ctx, localClient) - if err != nil { - return errors.Wrap(err, "set owner") - } - - // make sure the kubernetes service is synced - err = syncKubernetesService(ctx, localClient) - if err != nil { - return errors.Wrap(err, "sync kubernetes service") - } - - // start the node service provider - go func() { - ctx.NodeServiceProvider.Start(ctx.Context) - }() - - // register controllers - err = controllers.RegisterControllers(ctx) - if err != nil { - return err - } - - // write the kube config to secret - err = writeKubeConfigToSecret(ctx, &rawConfig) - if err != nil { - return err - } - - return nil - }) - if err != nil { - klog.Fatalf("Error starting leader election: %v", err) - } - }() - - // start the proxy - proxyServer, err := server.NewServer(ctx, options.RequestHeaderCaCert, options.ClientCaCert) - if err != nil { - return err - } - - // start the proxy server in secure mode - err = proxyServer.ServeOnListenerTLS(options.BindAddress, options.Port, ctx.StopChan) - if err != nil { - return err - } - - return nil -} - -func findOwner(ctx *context2.ControllerContext, localClient client.Client) error { - if ctx.Options.SetOwner { - // get current pod - podName, err := os.Hostname() - if err != nil { - klog.Errorf("Couldn't find current hostname: %v, will skip setting owner", err) - return nil // ignore error here - } - - pod := &corev1.Pod{} - err = localClient.Get(ctx.Context, types.NamespacedName{Namespace: ctx.Options.TargetNamespace, Name: podName}, pod) - if err != nil { - if kerrors.IsNotFound(err) { - klog.Errorf("Couldn't find current pod: %v, will skip setting owner", err) - return nil - } - - return errors.Wrap(err, "get owning pod") - } - - // check owner of pod - controller := metav1.GetControllerOf(pod) - if controller == nil { - klog.Errorf("No controller for pod %s/%s found, will skip setting owner", pod.Namespace, pod.Name) - return nil - } else if controller.APIVersion != appsv1.SchemeGroupVersion.String() || (controller.Kind != "ReplicaSet" && controller.Kind != "StatefulSet") { - klog.Errorf("Unsupported owner kind %s and apiVersion %s, will skip setting owner", controller.Kind, controller.APIVersion) - return nil - } - - // statefulset - if controller.Kind == "StatefulSet" { - statefulSet := &appsv1.StatefulSet{} - err = localClient.Get(ctx.Context, types.NamespacedName{Namespace: pod.Namespace, Name: controller.Name}, statefulSet) - if err != nil { - return errors.Wrap(err, "get owning stateful set") - } - - statefulSet.APIVersion = appsv1.SchemeGroupVersion.String() - statefulSet.Kind = "StatefulSet" - translate.Owner = statefulSet - return nil - } - - // replicaset - replicaSet := &appsv1.ReplicaSet{} - err = localClient.Get(ctx.Context, types.NamespacedName{Namespace: pod.Namespace, Name: controller.Name}, replicaSet) - if err != nil { - return errors.Wrap(err, "get owning replica set") - } - - // check owner of replica set - replicaSetController := metav1.GetControllerOf(replicaSet) - if controller == nil || replicaSetController.APIVersion != appsv1.SchemeGroupVersion.String() || replicaSetController.Kind != "Deployment" { - replicaSet.APIVersion = appsv1.SchemeGroupVersion.String() - replicaSet.Kind = "ReplicaSet" - translate.Owner = replicaSet - return nil - } - - // deployment - deployment := &appsv1.Deployment{} - err = localClient.Get(ctx.Context, types.NamespacedName{Namespace: pod.Namespace, Name: replicaSetController.Name}, deployment) - if err != nil { - return errors.Wrap(err, "get owning deployment") - } - - deployment.APIVersion = appsv1.SchemeGroupVersion.String() - deployment.Kind = "Deployment" - translate.Owner = deployment - return nil - } else if ctx.Options.DeprecatedOwningStatefulSet != "" { - statefulSet := &appsv1.StatefulSet{} - err := localClient.Get(ctx.Context, types.NamespacedName{Namespace: ctx.Options.TargetNamespace, Name: ctx.Options.DeprecatedOwningStatefulSet}, statefulSet) - if err != nil { - return errors.Wrap(err, "get owning statefulset") - } - - if statefulSet.Namespace == ctx.Options.TargetNamespace { - translate.Owner = statefulSet - } - } - - return nil -} - -func syncKubernetesService(ctx *context2.ControllerContext, localClient client.Client) error { - virtualClient, err := client.New(ctx.VirtualManager.GetConfig(), client.Options{Scheme: ctx.VirtualManager.GetScheme()}) - if err != nil { - return err - } - - err = services.SyncKubernetesService(ctx.Context, localClient, virtualClient, ctx.Options.ServiceNamespace, ctx.Options.ServiceName) - if err != nil { - return errors.Wrap(err, "sync kubernetes service") - } - - err = endpoints.SyncKubernetesServiceEndpoints(ctx.Context, localClient, virtualClient, ctx.Options.ServiceNamespace, ctx.Options.ServiceName) - if err != nil { - return errors.Wrap(err, "sync kubernetes service endpoints") - } - - return nil -} - -func writeKubeConfigToSecret(ctx *context2.ControllerContext, config *api.Config) error { - config = config.DeepCopy() - - // exchange kube config server & resolve certificate - for i := range config.Clusters { - // fill in data - if config.Clusters[i].CertificateAuthorityData == nil && config.Clusters[i].CertificateAuthority != "" { - o, err := ioutil.ReadFile(config.Clusters[i].CertificateAuthority) - if err != nil { - return err - } - - config.Clusters[i].CertificateAuthority = "" - config.Clusters[i].CertificateAuthorityData = o - } - - if ctx.Options.KubeConfigServer != "" { - config.Clusters[i].Server = ctx.Options.KubeConfigServer - } else { - config.Clusters[i].Server = fmt.Sprintf("https://localhost:%d", ctx.Options.Port) - } - } - - // resolve auth info cert & key - for i := range config.AuthInfos { - // fill in data - if config.AuthInfos[i].ClientCertificateData == nil && config.AuthInfos[i].ClientCertificate != "" { - o, err := ioutil.ReadFile(config.AuthInfos[i].ClientCertificate) - if err != nil { - return err - } - - config.AuthInfos[i].ClientCertificate = "" - config.AuthInfos[i].ClientCertificateData = o - } - if config.AuthInfos[i].ClientKeyData == nil && config.AuthInfos[i].ClientKey != "" { - o, err := ioutil.ReadFile(config.AuthInfos[i].ClientKey) - if err != nil { - return err - } - - config.AuthInfos[i].ClientKey = "" - config.AuthInfos[i].ClientKeyData = o - } - } - - // we have to create a new client here, because the cached version will always say - // the secret does not exist in another namespace - localClient, err := client.New(ctx.LocalManager.GetConfig(), client.Options{ - Scheme: ctx.LocalManager.GetScheme(), - Mapper: ctx.LocalManager.GetRESTMapper(), - }) - if err != nil { - return errors.Wrap(err, "create uncached client") - } - - // check if we need to write the kubeconfig secrete to the default location as well - if ctx.Options.KubeConfigSecret != "" { - // which namespace should we create the additional secret in? - secretNamespace := ctx.Options.KubeConfigSecretNamespace - if secretNamespace == "" { - secretNamespace = ctx.Options.TargetNamespace - } - err = kubeconfig.WriteKubeConfig(ctx.Context, localClient, ctx.Options.KubeConfigSecret, secretNamespace, config) - if err != nil { - return fmt.Errorf("creating %s secret in the %s ns failed: %v", ctx.Options.KubeConfigSecret, secretNamespace, err) - } - } - currentNamespace, err := clienthelper.CurrentNamespace() - if err != nil { - return err - } - // write the default Secret - return kubeconfig.WriteKubeConfig(ctx.Context, localClient, kubeconfig.GetDefaultSecretName(ctx.Options.Suffix), currentNamespace, config) -} diff --git a/cmd/vclusterctl/cmd/app/create/types.go b/cmd/vclusterctl/cmd/app/create/types.go new file mode 100644 index 000000000..463eed5bb --- /dev/null +++ b/cmd/vclusterctl/cmd/app/create/types.go @@ -0,0 +1,23 @@ +package create + +// CreateOptions holds the create cmd options +type CreateOptions struct { + ChartVersion string + ChartName string + ChartRepo string + K3SImage string + Distro string + CIDR string + ExtraValues []string + + CreateNamespace bool + DisableIngressSync bool + CreateClusterRole bool + Expose bool + Connect bool + Upgrade bool + + RunAsUser int64 + + ReleaseValues string +} diff --git a/cmd/vclusterctl/cmd/app/create/values/default.go b/cmd/vclusterctl/cmd/app/create/values/default.go new file mode 100644 index 000000000..d7ce3f22c --- /dev/null +++ b/cmd/vclusterctl/cmd/app/create/values/default.go @@ -0,0 +1,41 @@ +package values + +import ( + "errors" + "fmt" + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create" + "github.com/loft-sh/vcluster/cmd/vclusterctl/log" + "k8s.io/client-go/kubernetes" + "strings" +) + +var AllowedDistros = []string{"k3s", "k0s", "vanilla"} + +func GetDefaultReleaseValues(client kubernetes.Interface, createOptions *create.CreateOptions, log log.Logger) (string, error) { + if !contains(createOptions.Distro, AllowedDistros) { + return "", fmt.Errorf("unsupported distro %s, please select one of: %s", createOptions.Distro, strings.Join(AllowedDistros, ", ")) + } + + // set correct chart name + if createOptions.ChartName == "vcluster" && createOptions.Distro != "k3s" { + createOptions.ChartName += "-" + createOptions.Distro + } + + // now get the default values for the distro + if createOptions.Distro == "k3s" { + return getDefaultK3SReleaseValues(client, createOptions, log) + } else if createOptions.Distro == "k0s" { + return getDefaultK0SReleaseValues(client, createOptions, log) + } + + return "", errors.New("unrecognized distro " + createOptions.Distro) +} + +func contains(needle string, haystack []string) bool { + for _, n := range haystack { + if needle == n { + return true + } + } + return false +} diff --git a/cmd/vclusterctl/cmd/app/create/values/k0s.go b/cmd/vclusterctl/cmd/app/create/values/k0s.go new file mode 100644 index 000000000..b20f4e494 --- /dev/null +++ b/cmd/vclusterctl/cmd/app/create/values/k0s.go @@ -0,0 +1,45 @@ +package values + +import ( + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create" + "github.com/loft-sh/vcluster/cmd/vclusterctl/log" + "k8s.io/client-go/kubernetes" + "strings" +) + +var K0SVersionMap = map[string]string{ + "1.22": "k0sproject/k0s:v1.22.4-k0s.0", + "1.21": "k0sproject/k0s:v1.21.7-k0s.0", + "1.20": "k0sproject/k0s:v1.20.6-k0s.0", +} + +func getDefaultK0SReleaseValues(client kubernetes.Interface, createOptions *create.CreateOptions, log log.Logger) (string, error) { + image := createOptions.K3SImage + if image == "" { + serverVersionString, serverMinorInt, err := getKubernetesVersion(client) + if err != nil { + return "", err + } + + var ok bool + image, ok = K0SVersionMap[serverVersionString] + if !ok { + if serverMinorInt > 22 { + log.Infof("officially unsupported host server version %s, will fallback to virtual cluster version v1.22", serverVersionString) + image = K0SVersionMap["1.22"] + serverVersionString = "1.22" + } else { + log.Infof("officially unsupported host server version %s, will fallback to virtual cluster version v1.20", serverVersionString) + image = K0SVersionMap["1.20"] + serverVersionString = "1.20" + } + } + } + + // build values + values := `vcluster: + image: ##IMAGE## +` + values = strings.ReplaceAll(values, "##IMAGE##", image) + return addCommonReleaseValues(values, createOptions) +} diff --git a/cmd/vclusterctl/cmd/app/create/values/k3s.go b/cmd/vclusterctl/cmd/app/create/values/k3s.go new file mode 100644 index 000000000..a35bcf629 --- /dev/null +++ b/cmd/vclusterctl/cmd/app/create/values/k3s.go @@ -0,0 +1,162 @@ +package values + +import ( + "context" + "fmt" + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create" + "github.com/loft-sh/vcluster/cmd/vclusterctl/log" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "regexp" + "strconv" + "strings" +) + +var K3SVersionMap = map[string]string{ + "1.22": "rancher/k3s:v1.22.2-k3s2", + "1.21": "rancher/k3s:v1.21.5-k3s2", + "1.20": "rancher/k3s:v1.20.11-k3s2", + "1.19": "rancher/k3s:v1.19.13-k3s1", + "1.18": "rancher/k3s:v1.18.20-k3s1", + "1.17": "rancher/k3s:v1.17.17-k3s1", + "1.16": "rancher/k3s:v1.16.15-k3s1", +} + +const noDeployValues = ` baseArgs: + - server + - --write-kubeconfig=/k3s-config/kube-config.yaml + - --data-dir=/data + - --no-deploy=traefik,servicelb,metrics-server,local-storage + - --disable-network-policy + - --disable-agent + - --disable-scheduler + - --disable-cloud-controller + - --flannel-backend=none + - --kube-controller-manager-arg=controllers=*,-nodeipam,-nodelifecycle,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle` + +var baseArgsMap = map[string]string{ + "1.17": noDeployValues, + "1.16": noDeployValues, +} + +var replaceRegEx = regexp.MustCompile("[^0-9]+") +var errorMessageFind = "provided IP is not in the valid range. The range of valid IPs is " + +func getDefaultK3SReleaseValues(client kubernetes.Interface, createOptions *create.CreateOptions, log log.Logger) (string, error) { + var ( + image = createOptions.K3SImage + serverVersionString string + serverMinorInt int + err error + ) + + if image == "" { + serverVersionString, serverMinorInt, err = getKubernetesVersion(client) + if err != nil { + return "", err + } + + var ok bool + image, ok = K3SVersionMap[serverVersionString] + if !ok { + if serverMinorInt > 22 { + log.Infof("officially unsupported host server version %s, will fallback to virtual cluster version v1.22", serverVersionString) + image = K3SVersionMap["1.22"] + serverVersionString = "1.22" + } else { + log.Infof("officially unsupported host server version %s, will fallback to virtual cluster version v1.16", serverVersionString) + image = K3SVersionMap["1.16"] + serverVersionString = "1.16" + } + } + } + + // build values + values := `vcluster: + image: ##IMAGE## +##BASEARGS## +` + values = strings.ReplaceAll(values, "##IMAGE##", image) + if createOptions.K3SImage == "" { + baseArgs := baseArgsMap[serverVersionString] + values = strings.ReplaceAll(values, "##BASEARGS##", baseArgs) + } + + return addCommonReleaseValues(values, createOptions) +} + +func addCommonReleaseValues(values string, createOptions *create.CreateOptions) (string, error) { + values += ` +serviceCIDR: ##CIDR## +storage: + size: 5Gi` + if createOptions.DisableIngressSync { + values += ` +syncer: + extraArgs: ["--disable-sync-resources=ingresses"]` + } + if createOptions.CreateClusterRole { + values += ` +rbac: + clusterRole: + create: true` + } + + if createOptions.Expose { + values += ` +service: + type: LoadBalancer` + } + + if createOptions.RunAsUser >= 0 { + values += ` +securityContext: + runAsUser: ` + fmt.Sprint(createOptions.RunAsUser) + } + values = strings.ReplaceAll(values, "##CIDR##", createOptions.CIDR) + values = strings.TrimSpace(values) + return values, nil +} + +func getKubernetesVersion(client kubernetes.Interface) (string, int, error) { + serverVersion, err := client.Discovery().ServerVersion() + if err != nil { + return "", 0, err + } + + serverVersionString := replaceRegEx.ReplaceAllString(serverVersion.Major, "") + "." + replaceRegEx.ReplaceAllString(serverVersion.Minor, "") + serverMinorInt, err := strconv.Atoi(replaceRegEx.ReplaceAllString(serverVersion.Minor, "")) + if err != nil { + return "", 0, err + } + + return serverVersionString, serverMinorInt, nil +} + +func GetServiceCIDR(client kubernetes.Interface, namespace string) (string, error) { + _, err := client.CoreV1().Services(namespace).Create(context.Background(), &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-service-", + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Port: 80, + }, + }, + ClusterIP: "4.4.4.4", + }, + }, metav1.CreateOptions{}) + if err == nil { + return "", fmt.Errorf("couldn't find cluster service cidr, will fallback to 10.96.0.0/12, however this is probably wrong, please make sure the host cluster service cidr and virtual cluster service cidr match") + } + + errorMessage := err.Error() + idx := strings.Index(errorMessage, errorMessageFind) + if idx == -1 { + return "", fmt.Errorf("couldn't find cluster service cidr (" + errorMessage + "), will fallback to 10.96.0.0/12, however this is probably wrong, please make sure the host cluster service cidr and virtual cluster service cidr match") + } + + return strings.TrimSpace(errorMessage[idx+len(errorMessageFind):]), nil +} diff --git a/cmd/vclusterctl/cmd/create.go b/cmd/vclusterctl/cmd/create.go index a68c0e7d9..22d40f62f 100644 --- a/cmd/vclusterctl/cmd/create.go +++ b/cmd/vclusterctl/cmd/create.go @@ -3,11 +3,11 @@ package cmd import ( "context" "fmt" + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create" + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create/values" "io/ioutil" "os" "os/exec" - "regexp" - "strconv" "strings" "github.com/loft-sh/vcluster/pkg/upgrade" @@ -24,55 +24,10 @@ import ( "k8s.io/client-go/tools/clientcmd" ) -var VersionMap = map[string]string{ - "1.22": "rancher/k3s:v1.22.2-k3s2", - "1.21": "rancher/k3s:v1.21.5-k3s2", - "1.20": "rancher/k3s:v1.20.11-k3s2", - "1.19": "rancher/k3s:v1.19.13-k3s1", - "1.18": "rancher/k3s:v1.18.20-k3s1", - "1.17": "rancher/k3s:v1.17.17-k3s1", - "1.16": "rancher/k3s:v1.16.15-k3s1", -} - -const noDeployValues = ` baseArgs: - - server - - --write-kubeconfig=/k3s-config/kube-config.yaml - - --data-dir=/data - - --no-deploy=traefik,servicelb,metrics-server,local-storage - - --disable-network-policy - - --disable-agent - - --disable-scheduler - - --disable-cloud-controller - - --flannel-backend=none - - --kube-controller-manager-arg=controllers=*,-nodeipam,-nodelifecycle,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle` - -var baseArgsMap = map[string]string{ - "1.17": noDeployValues, - "1.16": noDeployValues, -} - -var errorMessageFind = "provided IP is not in the valid range. The range of valid IPs is " -var replaceRegEx = regexp.MustCompile("[^0-9]+") - // CreateCmd holds the login cmd flags type CreateCmd struct { *flags.GlobalFlags - - ChartVersion string - ChartName string - ChartRepo string - ReleaseValues string - K3SImage string - ExtraValues []string - - CreateNamespace bool - DisableIngressSync bool - CreateClusterRole bool - Expose bool - Connect bool - Upgrade bool - - RunAsUser int64 + create.CreateOptions log log.Logger } @@ -102,15 +57,16 @@ vcluster create test --namespace test // Check for newer version upgrade.PrintNewerVersionWarning() - return cmd.Run(cobraCmd, args) + return cmd.Run(args) }, } cobraCmd.Flags().StringVar(&cmd.ChartVersion, "chart-version", upgrade.GetVersion(), "The virtual cluster chart version to use (e.g. v0.4.0)") cobraCmd.Flags().StringVar(&cmd.ChartName, "chart-name", "vcluster", "The virtual cluster chart name to use") cobraCmd.Flags().StringVar(&cmd.ChartRepo, "chart-repo", "https://charts.loft.sh", "The virtual cluster chart repo to use") - cobraCmd.Flags().StringVar(&cmd.ReleaseValues, "release-values", "", "Path where to load the virtual cluster helm release values from") cobraCmd.Flags().StringVar(&cmd.K3SImage, "k3s-image", "", "If specified, use this k3s image version") + cobraCmd.Flags().StringVar(&cmd.Distro, "distro", "k3s", fmt.Sprintf("Kubernetes distro to use for the virtual cluster. Allowed distros: %s", strings.Join(values.AllowedDistros, ", "))) + cobraCmd.Flags().StringVar(&cmd.ReleaseValues, "release-values", "", "DEPRECATED: use --extra-values instead") cobraCmd.Flags().StringSliceVarP(&cmd.ExtraValues, "extra-values", "f", []string{}, "Path where to load extra helm values from") cobraCmd.Flags().BoolVar(&cmd.CreateNamespace, "create-namespace", true, "If true the namespace will be created if it does not exist") cobraCmd.Flags().BoolVar(&cmd.DisableIngressSync, "disable-ingress-sync", false, "If true the virtual cluster will not sync any ingresses") @@ -118,12 +74,12 @@ vcluster create test --namespace test cobraCmd.Flags().BoolVar(&cmd.Expose, "expose", false, "If true will create a load balancer service to expose the vcluster endpoint") cobraCmd.Flags().BoolVar(&cmd.Connect, "connect", false, "If true will run vcluster connect directly after the vcluster was created") cobraCmd.Flags().BoolVar(&cmd.Upgrade, "upgrade", true, "If true will try to upgrade the vcluster instead of failing if it already exists") - cobraCmd.Flags().Int64Var(&cmd.RunAsUser, "run-as-user", 0, "User UID that will be used to run the containers in vcluster pod and vcluster CoreDNS. Set to a non-zero value to run vcluster as non-root user. The value must be in a range that is acceptable by your cluster.") + cobraCmd.Flags().Int64Var(&cmd.RunAsUser, "run-as-user", -1, "User UID that will be used to run the containers in vcluster pod and vcluster CoreDNS. Set to a non-zero value to run vcluster as non-root user. The value must be in a range that is acceptable by your cluster.") return cobraCmd } // Run executes the functionality -func (cmd *CreateCmd) Run(cobraCmd *cobra.Command, args []string) error { +func (cmd *CreateCmd) Run(args []string) error { // test for helm helmExecutablePath, err := exec.LookPath("helm") if err != nil { @@ -160,25 +116,24 @@ func (cmd *CreateCmd) Run(cobraCmd *cobra.Command, args []string) error { return err } - namespace, _, err := kubeClientConfig.Namespace() - if err != nil { - return err - } else if namespace == "" { - namespace = "default" - } - if cmd.Namespace != "" { - namespace = cmd.Namespace + if cmd.Namespace == "" { + cmd.Namespace, _, err = kubeClientConfig.Namespace() + if err != nil { + return err + } else if cmd.Namespace == "" { + cmd.Namespace = "default" + } } // make sure namespace exists - _, err = client.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{}) + _, err = client.CoreV1().Namespaces().Get(context.Background(), cmd.Namespace, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { // try to create the namespace - cmd.log.Infof("Creating namespace %s", namespace) + cmd.log.Infof("Creating namespace %s", cmd.Namespace) _, err = client.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: namespace, + Name: cmd.Namespace, }, }, metav1.CreateOptions{}) if err != nil { @@ -190,33 +145,28 @@ func (cmd *CreateCmd) Run(cobraCmd *cobra.Command, args []string) error { } // get service cidr - cidr, err := getServiceCIDR(client, namespace) - if err != nil { - cmd.log.Warn(err) - cidr = "10.96.0.0/12" - } - - // load the default values - values := "" - if cmd.ReleaseValues == "" { - values, err = cmd.getDefaultReleaseValues(cobraCmd, client, cidr, cmd.log) - if err != nil { - return err - } - } else { - byteValues, err := ioutil.ReadFile(cmd.ReleaseValues) + if cmd.CIDR == "" { + cmd.CIDR, err = values.GetServiceCIDR(client, cmd.Namespace) if err != nil { - return errors.Wrap(err, "read release values") + cmd.log.Warn(err) + cmd.CIDR = "10.96.0.0/12" } + } - values = string(byteValues) + // load the default values + chartValues, err := values.GetDefaultReleaseValues(client, &cmd.CreateOptions, cmd.log) + if err != nil { + return err + } + if cmd.ReleaseValues != "" { + cmd.ExtraValues = append(cmd.ExtraValues, cmd.ReleaseValues) } // check if vcluster already exists if cmd.Upgrade == false { - _, err = client.AppsV1().StatefulSets(namespace).Get(context.TODO(), args[0], metav1.GetOptions{}) + _, err = client.AppsV1().StatefulSets(cmd.Namespace).Get(context.TODO(), args[0], metav1.GetOptions{}) if err == nil { - return fmt.Errorf("vcluster %s already exists in namespace %s", args[0], namespace) + return fmt.Errorf("vcluster %s already exists in namespace %s", args[0], cmd.Namespace) } } @@ -238,7 +188,7 @@ func (cmd *CreateCmd) Run(cobraCmd *cobra.Command, args []string) error { } defer os.Remove(tempFile.Name()) - _, err = tempFile.WriteString(strings.Replace(string(out), "##CIDR##", cidr, -1)) + _, err = tempFile.WriteString(strings.Replace(string(out), "##CIDR##", cmd.CIDR, -1)) if err != nil { return errors.Wrap(err, "write values to temp file") } @@ -253,18 +203,18 @@ func (cmd *CreateCmd) Run(cobraCmd *cobra.Command, args []string) error { } // we have to upgrade / install the chart - err = helm.NewClient(&rawConfig, cmd.log).Upgrade(args[0], namespace, helm.UpgradeOptions{ + err = helm.NewClient(&rawConfig, cmd.log).Upgrade(args[0], cmd.Namespace, helm.UpgradeOptions{ Chart: cmd.ChartName, Repo: cmd.ChartRepo, Version: cmd.ChartVersion, - Values: values, + Values: chartValues, ValuesFiles: extraValues, }) if err != nil { return err } - cmd.log.Donef("Successfully created virtual cluster %s in namespace %s. Use 'vcluster connect %s --namespace %s' to access the virtual cluster", args[0], namespace, args[0], namespace) + cmd.log.Donef("Successfully created virtual cluster %s in namespace %s. Use 'vcluster connect %s --namespace %s' to access the virtual cluster", args[0], cmd.Namespace, args[0], cmd.Namespace) // check if we should connect to the vcluster if cmd.Connect { @@ -277,106 +227,5 @@ func (cmd *CreateCmd) Run(cobraCmd *cobra.Command, args []string) error { return connectCmd.Connect(args[0]) } - return nil } - -func (cmd *CreateCmd) getDefaultReleaseValues(cobraCmd *cobra.Command, client kubernetes.Interface, cidr string, log log.Logger) (string, error) { - image := cmd.K3SImage - serverVersionString := "" - if image == "" { - serverVersion, err := client.Discovery().ServerVersion() - if err != nil { - return "", err - } - - serverVersionString = replaceRegEx.ReplaceAllString(serverVersion.Major, "") + "." + replaceRegEx.ReplaceAllString(serverVersion.Minor, "") - serverMinorInt, err := strconv.Atoi(replaceRegEx.ReplaceAllString(serverVersion.Minor, "")) - if err != nil { - return "", err - } - - var ok bool - image, ok = VersionMap[serverVersionString] - if !ok { - if serverMinorInt > 22 { - log.Infof("officially unsupported host server version %s, will fallback to virtual cluster version v1.22", serverVersionString) - image = VersionMap["1.22"] - serverVersionString = "1.22" - } else { - log.Infof("officially unsupported host server version %s, will fallback to virtual cluster version v1.16", serverVersionString) - image = VersionMap["1.16"] - serverVersionString = "1.16" - } - } - } - - // build values - values := `vcluster: - image: ##IMAGE## - extraArgs: - - --service-cidr=##CIDR## -##BASEARGS## -storage: - size: 5Gi -` - if cmd.DisableIngressSync { - values += ` -syncer: - extraArgs: ["--disable-sync-resources=ingresses"]` - } - if cmd.CreateClusterRole { - values += ` -rbac: - clusterRole: - create: true` - } - - if cmd.Expose { - values += ` -service: - type: LoadBalancer` - } - - if cobraCmd.Flags().Changed("run-as-user") { - values += ` -securityContext: - runAsUser: ` + fmt.Sprint(cmd.RunAsUser) - } - - values = strings.ReplaceAll(values, "##IMAGE##", image) - values = strings.ReplaceAll(values, "##CIDR##", cidr) - if cmd.K3SImage == "" { - baseArgs := baseArgsMap[serverVersionString] - values = strings.ReplaceAll(values, "##BASEARGS##", baseArgs) - } - values = strings.TrimSpace(values) - return values, nil -} - -func getServiceCIDR(client kubernetes.Interface, namespace string) (string, error) { - _, err := client.CoreV1().Services(namespace).Create(context.Background(), &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-service-", - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Port: 80, - }, - }, - ClusterIP: "4.4.4.4", - }, - }, metav1.CreateOptions{}) - if err == nil { - return "", fmt.Errorf("couldn't find cluster service cidr, will fallback to 10.96.0.0/12, however this is probably wrong, please make sure the host cluster service cidr and virtual cluster service cidr match") - } - - errorMessage := err.Error() - idx := strings.Index(errorMessage, errorMessageFind) - if idx == -1 { - return "", fmt.Errorf("couldn't find cluster service cidr (" + errorMessage + "), will fallback to 10.96.0.0/12, however this is probably wrong, please make sure the host cluster service cidr and virtual cluster service cidr match") - } - - return strings.TrimSpace(errorMessage[idx+len(errorMessageFind):]), nil -} diff --git a/devspace.yaml b/devspace.yaml index d708bb928..7d977a8bd 100644 --- a/devspace.yaml +++ b/devspace.yaml @@ -20,8 +20,9 @@ deployments: - name: vcluster helm: chart: - name: ./chart + name: ./charts/k3s values: + serviceCIDR: ${SERVICE_CIDR} tolerations: - operator: "Exists" serviceAccount: @@ -29,8 +30,6 @@ deployments: name: default vcluster: image: ${K3S_IMAGE} - extraArgs: - - --service-cidr=${SERVICE_CIDR} rbac: clusterRole: create: true diff --git a/docs/pages/operator/other-distributions.mdx b/docs/pages/operator/other-distributions.mdx index c5233062f..f86f91b03 100644 --- a/docs/pages/operator/other-distributions.mdx +++ b/docs/pages/operator/other-distributions.mdx @@ -5,71 +5,16 @@ sidebar_label: Other k8s Distributions By default, vcluster will use [k3s](https://github.com/k3s-io/k3s) as virtual Kubernetes cluster, which is a highly available, certified Kubernetes distribution designed for production workloads in unattended, resource-constrained, remote locations or inside IoT appliances. -However, vcluster also works with other Kubernetes distributions and k3s is not required. We recommend to use k3s, because it has a small footprint and widely adopted, but if your use case requires a different k8s distribution, you can exchange k3s with another distribution such as k0s or vanilla k8s. +However, vcluster also works with other Kubernetes distributions and k3s is not required. We recommend to use k3s, because it has a small footprint and widely adopted, but if your use case requires a different k8s distribution, vcluster also supports k0s or vanilla k8s. ## k0s [k0s](https://github.com/k0sproject/k0s) is an all-inclusive Kubernetes distribution, which is configured with all of the features needed to build a Kubernetes cluster and packaged as a single binary for ease of use. vcluster supports k0s as backing virtual Kubernetes cluster. -In order to use k0s as backing cluster, create a `values.yaml` in the following form: +In order to use k0s as backing cluster, create a vcluster with the following command: ``` -vcluster: - # Replace this with the desired k0s version - image: k0sproject/k0s:v1.22.4-k0s.0 - command: ["k0s"] - baseArgs: - - controller - - --config=/etc/k0s/config.yaml - - --data-dir=/data/k0s - - --disable-components=konnectivity-server,kube-scheduler,csr-approver,default-psp,kube-proxy,coredns,network-provider,helm,metrics-server,kubelet-config - extraArgs: [] - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/k0s - name: k0s-config - -syncer: - extraArgs: - - --request-header-ca-cert=/data/k0s/pki/ca.crt - - --client-ca-cert=/data/k0s/pki/ca.crt - - --server-ca-cert=/data/k0s/pki/ca.crt - - --server-ca-key=/data/k0s/pki/ca.key - - --kube-config=/data/k0s/pki/admin.conf - -volumes: - - name: k0s-config - secret: - secretName: k0s-config - -secret: - name: k0s-config - data: - config.yaml: |- - apiVersion: k0s.k0sproject.io/v1beta1 - kind: Cluster - metadata: - name: k0s - spec: - api: - port: 6443 - k0sApiPort: 9443 - extraArgs: - enable-admission-plugins: NodeRestriction - network: - # Will be replaced automatically from the vcluster cli - serviceCIDR: ##CIDR## - provider: custom - controllerManager: - extraArgs: - controllers: '*,-nodeipam,-nodelifecycle,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle' -``` - -Now create the vcluster with: - -``` -vcluster create vcluster-1 -n host-namespace-1 -f values.yaml +vcluster create vcluster-1 -n host-namespace-1 --distro k0s ``` Connect to the vcluster and start using it: @@ -82,7 +27,7 @@ kubectl get ns ... ``` -Congrats, you have deployed vcluster with k0s as virtual Kubernetes cluster distribution! +Behind the scenes a different helm chart will be deployed (`vcluster-k0s`), that holds specific configuration to support k0s. ## Vanilla k8s diff --git a/pkg/certs/cert_list.go b/pkg/certs/cert_list.go new file mode 100644 index 000000000..8c82105d6 --- /dev/null +++ b/pkg/certs/cert_list.go @@ -0,0 +1,419 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +import ( + "crypto" + "crypto/x509" + "github.com/pkg/errors" + + certutil "k8s.io/client-go/util/cert" +) + +type configMutatorsFunc func(*InitConfiguration, *CertConfig) error + +// KubeadmCert represents a certificate that Kubeadm will create to function properly. +type KubeadmCert struct { + Name string + LongName string + BaseName string + CAName string + // Some attributes will depend on the InitConfiguration, only known at runtime. + // These functions will be run in series, passed both the InitConfiguration and a cert Config. + configMutators []configMutatorsFunc + config CertConfig +} + +// GetConfig returns the definition for the given cert given the provided InitConfiguration +func (k *KubeadmCert) GetConfig(ic *InitConfiguration) (*CertConfig, error) { + for _, f := range k.configMutators { + if err := f(ic, &k.config); err != nil { + return nil, err + } + } + + k.config.PublicKeyAlgorithm = ic.ClusterConfiguration.PublicKeyAlgorithm() + return &k.config, nil +} + +// CreateFromCA makes and writes a certificate using the given CA cert and key. +func (k *KubeadmCert) CreateFromCA(ic *InitConfiguration, caCert *x509.Certificate, caKey crypto.Signer) error { + cfg, err := k.GetConfig(ic) + if err != nil { + return errors.Wrapf(err, "couldn't create %q certificate", k.Name) + } + cert, key, err := NewCertAndKey(caCert, caKey, cfg) + if err != nil { + return err + } + err = writeCertificateFilesIfNotExist( + ic.CertificatesDir, + k.BaseName, + caCert, + cert, + key, + cfg, + ) + + if err != nil { + return errors.Wrapf(err, "failed to write or validate certificate %q", k.Name) + } + + return nil +} + +// CreateAsCA creates a certificate authority, writing the files to disk and also returning the created CA so it can be used to sign child certs. +func (k *KubeadmCert) CreateAsCA(ic *InitConfiguration) (*x509.Certificate, crypto.Signer, error) { + cfg, err := k.GetConfig(ic) + if err != nil { + return nil, nil, errors.Wrapf(err, "couldn't get configuration for %q CA certificate", k.Name) + } + caCert, caKey, err := NewCertificateAuthority(cfg) + if err != nil { + return nil, nil, errors.Wrapf(err, "couldn't generate %q CA certificate", k.Name) + } + + err = writeCertificateAuthorityFilesIfNotExist( + ic.CertificatesDir, + k.BaseName, + caCert, + caKey, + ) + if err != nil { + return nil, nil, errors.Wrapf(err, "couldn't write out %q CA certificate", k.Name) + } + + return caCert, caKey, nil +} + +// CertificateTree is represents a one-level-deep tree, mapping a CA to the certs that depend on it. +type CertificateTree map[*KubeadmCert]Certificates + +// CreateTree creates the CAs, certs signed by the CAs, and writes them all to disk. +func (t CertificateTree) CreateTree(ic *InitConfiguration) error { + for ca, leaves := range t { + cfg, err := ca.GetConfig(ic) + if err != nil { + return err + } + + var caKey crypto.Signer + + caCert, err := TryLoadCertFromDisk(ic.CertificatesDir, ca.BaseName) + if err == nil { + // Validate period + CheckCertificatePeriodValidity(ca.BaseName, caCert) + + // Cert exists already, make sure it's valid + if !caCert.IsCA { + return errors.Errorf("certificate %q is not a CA", ca.Name) + } + // Try and load a CA Key + caKey, err = TryLoadKeyFromDisk(ic.CertificatesDir, ca.BaseName) + if err != nil { + // If there's no CA key, make sure every certificate exists. + for _, leaf := range leaves { + cl := certKeyLocation{ + pkiDir: ic.CertificatesDir, + baseName: leaf.BaseName, + uxName: leaf.Name, + } + if err := validateSignedCertWithCA(cl, caCert); err != nil { + return errors.Wrapf(err, "could not load expected certificate %q or validate the existence of key %q for it", leaf.Name, ca.Name) + } + } + continue + } + // CA key exists; just use that to create new certificates. + } else { + // CACert doesn't already exist, create a new cert and key. + caCert, caKey, err = NewCertificateAuthority(cfg) + if err != nil { + return err + } + + err = writeCertificateAuthorityFilesIfNotExist( + ic.CertificatesDir, + ca.BaseName, + caCert, + caKey, + ) + if err != nil { + return err + } + } + + for _, leaf := range leaves { + if err := leaf.CreateFromCA(ic, caCert, caKey); err != nil { + return err + } + } + } + return nil +} + +// CertificateMap is a flat map of certificates, keyed by Name. +type CertificateMap map[string]*KubeadmCert + +// CertTree returns a one-level-deep tree, mapping a CA cert to an array of certificates that should be signed by it. +func (m CertificateMap) CertTree() (CertificateTree, error) { + caMap := make(CertificateTree) + + for _, cert := range m { + if cert.CAName == "" { + if _, ok := caMap[cert]; !ok { + caMap[cert] = []*KubeadmCert{} + } + } else { + ca, ok := m[cert.CAName] + if !ok { + return nil, errors.Errorf("certificate %q references unknown CA %q", cert.Name, cert.CAName) + } + caMap[ca] = append(caMap[ca], cert) + } + } + + return caMap, nil +} + +// Certificates is a list of Certificates that Kubeadm should create. +type Certificates []*KubeadmCert + +// AsMap returns the list of certificates as a map, keyed by name. +func (c Certificates) AsMap() CertificateMap { + certMap := make(map[string]*KubeadmCert) + for _, cert := range c { + certMap[cert.Name] = cert + } + + return certMap +} + +// GetDefaultCertList returns all of the certificates kubeadm requires to function. +func GetDefaultCertList() Certificates { + return Certificates{ + KubeadmCertRootCA(), + KubeadmCertAPIServer(), + KubeadmCertKubeletClient(), + // Front Proxy certs + KubeadmCertFrontProxyCA(), + KubeadmCertFrontProxyClient(), + // etcd certs + KubeadmCertEtcdCA(), + KubeadmCertEtcdServer(), + KubeadmCertEtcdPeer(), + KubeadmCertEtcdHealthcheck(), + KubeadmCertEtcdAPIClient(), + } +} + +// GetCertsWithoutEtcd returns all of the certificates kubeadm needs when etcd is hosted externally. +func GetCertsWithoutEtcd() Certificates { + return Certificates{ + KubeadmCertRootCA(), + KubeadmCertAPIServer(), + KubeadmCertKubeletClient(), + // Front Proxy certs + KubeadmCertFrontProxyCA(), + KubeadmCertFrontProxyClient(), + } +} + +// KubeadmCertRootCA is the definition of the Kubernetes Root CA for the API Server and kubelet. +func KubeadmCertRootCA() *KubeadmCert { + return &KubeadmCert{ + Name: "ca", + LongName: "self-signed Kubernetes CA to provision identities for other Kubernetes components", + BaseName: CACertAndKeyBaseName, + config: CertConfig{ + Config: certutil.Config{ + CommonName: "kubernetes", + }, + }, + } +} + +// KubeadmCertAPIServer is the definition of the cert used to serve the Kubernetes API. +func KubeadmCertAPIServer() *KubeadmCert { + return &KubeadmCert{ + Name: "apiserver", + LongName: "certificate for serving the Kubernetes API", + BaseName: APIServerCertAndKeyBaseName, + CAName: "ca", + config: CertConfig{ + Config: certutil.Config{ + CommonName: APIServerCertCommonName, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }, + }, + configMutators: []configMutatorsFunc{ + makeAltNamesMutator(GetAPIServerAltNames), + }, + } +} + +// KubeadmCertKubeletClient is the definition of the cert used by the API server to access the kubelet. +func KubeadmCertKubeletClient() *KubeadmCert { + return &KubeadmCert{ + Name: "apiserver-kubelet-client", + LongName: "certificate for the API server to connect to kubelet", + BaseName: APIServerKubeletClientCertAndKeyBaseName, + CAName: "ca", + config: CertConfig{ + Config: certutil.Config{ + CommonName: APIServerKubeletClientCertCommonName, + Organization: []string{SystemPrivilegedGroup}, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }, + }, + } +} + +// KubeadmCertFrontProxyCA is the definition of the CA used for the front end proxy. +func KubeadmCertFrontProxyCA() *KubeadmCert { + return &KubeadmCert{ + Name: "front-proxy-ca", + LongName: "self-signed CA to provision identities for front proxy", + BaseName: FrontProxyCACertAndKeyBaseName, + config: CertConfig{ + Config: certutil.Config{ + CommonName: "front-proxy-ca", + }, + }, + } +} + +// KubeadmCertFrontProxyClient is the definition of the cert used by the API server to access the front proxy. +func KubeadmCertFrontProxyClient() *KubeadmCert { + return &KubeadmCert{ + Name: "front-proxy-client", + BaseName: FrontProxyClientCertAndKeyBaseName, + LongName: "certificate for the front proxy client", + CAName: "front-proxy-ca", + config: CertConfig{ + Config: certutil.Config{ + CommonName: FrontProxyClientCertCommonName, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }, + }, + } +} + +// KubeadmCertEtcdCA is the definition of the root CA used by the hosted etcd server. +func KubeadmCertEtcdCA() *KubeadmCert { + return &KubeadmCert{ + Name: "etcd-ca", + LongName: "self-signed CA to provision identities for etcd", + BaseName: EtcdCACertAndKeyBaseName, + config: CertConfig{ + Config: certutil.Config{ + CommonName: "etcd-ca", + }, + }, + } +} + +// KubeadmCertEtcdServer is the definition of the cert used to serve etcd to clients. +func KubeadmCertEtcdServer() *KubeadmCert { + return &KubeadmCert{ + Name: "etcd-server", + LongName: "certificate for serving etcd", + BaseName: EtcdServerCertAndKeyBaseName, + CAName: "etcd-ca", + config: CertConfig{ + Config: certutil.Config{ + // TODO: etcd 3.2 introduced an undocumented requirement for ClientAuth usage on the + // server cert: https://github.com/coreos/etcd/issues/9785#issuecomment-396715692 + // Once the upstream issue is resolved, this should be returned to only allowing + // ServerAuth usage. + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }, + }, + configMutators: []configMutatorsFunc{ + makeAltNamesMutator(GetEtcdAltNames), + setCommonNameToNodeName(), + }, + } +} + +// KubeadmCertEtcdPeer is the definition of the cert used by etcd peers to access each other. +func KubeadmCertEtcdPeer() *KubeadmCert { + return &KubeadmCert{ + Name: "etcd-peer", + LongName: "certificate for etcd nodes to communicate with each other", + BaseName: EtcdPeerCertAndKeyBaseName, + CAName: "etcd-ca", + config: CertConfig{ + Config: certutil.Config{ + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }, + }, + configMutators: []configMutatorsFunc{ + makeAltNamesMutator(GetEtcdPeerAltNames), + setCommonNameToNodeName(), + }, + } +} + +// KubeadmCertEtcdHealthcheck is the definition of the cert used by Kubernetes to check the health of the etcd server. +func KubeadmCertEtcdHealthcheck() *KubeadmCert { + return &KubeadmCert{ + Name: "etcd-healthcheck-client", + LongName: "certificate for liveness probes to healthcheck etcd", + BaseName: EtcdHealthcheckClientCertAndKeyBaseName, + CAName: "etcd-ca", + config: CertConfig{ + Config: certutil.Config{ + CommonName: EtcdHealthcheckClientCertCommonName, + Organization: []string{SystemPrivilegedGroup}, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }, + }, + } +} + +// KubeadmCertEtcdAPIClient is the definition of the cert used by the API server to access etcd. +func KubeadmCertEtcdAPIClient() *KubeadmCert { + return &KubeadmCert{ + Name: "apiserver-etcd-client", + LongName: "certificate the apiserver uses to access etcd", + BaseName: APIServerEtcdClientCertAndKeyBaseName, + CAName: "etcd-ca", + config: CertConfig{ + Config: certutil.Config{ + CommonName: APIServerEtcdClientCertCommonName, + Organization: []string{SystemPrivilegedGroup}, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }, + }, + } +} + +func makeAltNamesMutator(f func(*InitConfiguration) (*certutil.AltNames, error)) configMutatorsFunc { + return func(mc *InitConfiguration, cc *CertConfig) error { + altNames, err := f(mc) + if err != nil { + return err + } + cc.AltNames = *altNames + return nil + } +} + +func setCommonNameToNodeName() configMutatorsFunc { + return func(mc *InitConfiguration, cc *CertConfig) error { + cc.CommonName = mc.NodeRegistration.Name + return nil + } +} diff --git a/pkg/certs/certs.go b/pkg/certs/certs.go new file mode 100644 index 000000000..4d93335d3 --- /dev/null +++ b/pkg/certs/certs.go @@ -0,0 +1,245 @@ +/* +Copyright 2016 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +import ( + "crypto" + "crypto/x509" + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/pkg/errors" + + "k8s.io/client-go/util/keyutil" + "k8s.io/klog/v2" +) + +var ( + // certPeriodValidation is used to store if period validation was done for a certificate + certPeriodValidationMutex sync.Mutex + certPeriodValidation = map[string]struct{}{} +) + +// CreatePKIAssets will create and write to disk all PKI assets necessary to establish the control plane. +// If the PKI assets already exists in the target folder, they are used only if evaluated equal; otherwise an error is returned. +func CreatePKIAssets(cfg *InitConfiguration) error { + klog.V(1).Infoln("creating PKI assets") + + // This structure cannot handle multilevel CA hierarchies. + // This isn't a problem right now, but may become one in the future. + + var certList Certificates + + if cfg.Etcd.Local == nil { + certList = GetCertsWithoutEtcd() + } else { + certList = GetDefaultCertList() + } + + certTree, err := certList.AsMap().CertTree() + if err != nil { + return err + } + + if err := certTree.CreateTree(cfg); err != nil { + return errors.Wrap(err, "error creating PKI assets") + } + + fmt.Printf("[certs] Valid certificates and keys now exist in %q\n", cfg.CertificatesDir) + + // Service accounts are not x509 certs, so handled separately + return CreateServiceAccountKeyAndPublicKeyFiles(cfg.CertificatesDir, cfg.ClusterConfiguration.PublicKeyAlgorithm()) +} + +// CreateServiceAccountKeyAndPublicKeyFiles creates new public/private key files for signing service account users. +// If the sa public/private key files already exist in the target folder, they are used only if evaluated equals; otherwise an error is returned. +func CreateServiceAccountKeyAndPublicKeyFiles(certsDir string, keyType x509.PublicKeyAlgorithm) error { + klog.V(1).Infoln("creating new public/private key files for signing service account users") + _, err := keyutil.PrivateKeyFromFile(filepath.Join(certsDir, ServiceAccountPrivateKeyName)) + if err == nil { + // kubeadm doesn't validate the existing certificate key more than this; + // Basically, if we find a key file with the same path kubeadm thinks those files + // are equal and doesn't bother writing a new file + fmt.Printf("[certs] Using the existing %q key\n", ServiceAccountKeyBaseName) + return nil + } else if !os.IsNotExist(err) { + return errors.Wrapf(err, "file %s existed but it could not be loaded properly", ServiceAccountPrivateKeyName) + } + + // The key does NOT exist, let's generate it now + key, err := NewPrivateKey(keyType) + if err != nil { + return err + } + + // Write .key and .pub files to disk + fmt.Printf("[certs] Generating %q key and public key\n", ServiceAccountKeyBaseName) + + if err := WriteKey(certsDir, ServiceAccountKeyBaseName, key); err != nil { + return err + } + + return WritePublicKey(certsDir, ServiceAccountKeyBaseName, key.Public()) +} + +// writeCertificateAuthorityFilesIfNotExist write a new certificate Authority to the given path. +// If there already is a certificate file at the given path; kubeadm tries to load it and check if the values in the +// existing and the expected certificate equals. If they do; kubeadm will just skip writing the file as it's up-to-date, +// otherwise this function returns an error. +func writeCertificateAuthorityFilesIfNotExist(pkiDir string, baseName string, caCert *x509.Certificate, caKey crypto.Signer) error { + + // If cert or key exists, we should try to load them + if CertOrKeyExist(pkiDir, baseName) { + + // Try to load .crt and .key from the PKI directory + caCert, _, err := TryLoadCertAndKeyFromDisk(pkiDir, baseName) + if err != nil { + return errors.Wrapf(err, "failure loading %s certificate", baseName) + } + // Validate period + CheckCertificatePeriodValidity(baseName, caCert) + + // Check if the existing cert is a CA + if !caCert.IsCA { + return errors.Errorf("certificate %s is not a CA", baseName) + } + + // kubeadm doesn't validate the existing certificate Authority more than this; + // Basically, if we find a certificate file with the same path; and it is a CA + // kubeadm thinks those files are equal and doesn't bother writing a new file + fmt.Printf("[certs] Using the existing %q certificate and key\n", baseName) + } else { + // Write .crt and .key files to disk + fmt.Printf("[certs] Generating %q certificate and key\n", baseName) + + if err := WriteCertAndKey(pkiDir, baseName, caCert, caKey); err != nil { + return errors.Wrapf(err, "failure while saving %s certificate and key", baseName) + } + } + return nil +} + +// writeCertificateFilesIfNotExist write a new certificate to the given path. +// If there already is a certificate file at the given path; kubeadm tries to load it and check if the values in the +// existing and the expected certificate equals. If they do; kubeadm will just skip writing the file as it's up-to-date, +// otherwise this function returns an error. +func writeCertificateFilesIfNotExist(pkiDir string, baseName string, signingCert *x509.Certificate, cert *x509.Certificate, key crypto.Signer, cfg *CertConfig) error { + + // Checks if the signed certificate exists in the PKI directory + if CertOrKeyExist(pkiDir, baseName) { + // Try to load key from the PKI directory + _, err := TryLoadKeyFromDisk(pkiDir, baseName) + if err != nil { + return errors.Wrapf(err, "failure loading %s key", baseName) + } + + // Try to load certificate from the PKI directory + signedCert, intermediates, err := TryLoadCertChainFromDisk(pkiDir, baseName) + if err != nil { + return errors.Wrapf(err, "failure loading %s certificate", baseName) + } + // Validate period + CheckCertificatePeriodValidity(baseName, signedCert) + + // Check if the existing cert is signed by the given CA + if err := VerifyCertChain(signedCert, intermediates, signingCert); err != nil { + return errors.Errorf("certificate %s is not signed by corresponding CA", baseName) + } + + // Check if the certificate has the correct attributes + if err := validateCertificateWithConfig(signedCert, baseName, cfg); err != nil { + return err + } + + fmt.Printf("[certs] Using the existing %q certificate and key\n", baseName) + } else { + // Write .crt and .key files to disk + fmt.Printf("[certs] Generating %q certificate and key\n", baseName) + + if err := WriteCertAndKey(pkiDir, baseName, cert, key); err != nil { + return errors.Wrapf(err, "failure while saving %s certificate and key", baseName) + } + if HasServerAuth(cert) { + fmt.Printf("[certs] %s serving cert is signed for DNS names %v and IPs %v\n", baseName, cert.DNSNames, cert.IPAddresses) + } + } + + return nil +} + +type certKeyLocation struct { + pkiDir string + caBaseName string + baseName string + uxName string +} + +// validateSignedCertWithCA tries to load a certificate and private key and +// validates that the cert is signed by the given caCert +func validateSignedCertWithCA(l certKeyLocation, caCert *x509.Certificate) error { + // Try to load key from the PKI directory + _, err := TryLoadKeyFromDisk(l.pkiDir, l.baseName) + if err != nil { + return errors.Wrapf(err, "failure loading key for %s", l.baseName) + } + + // Try to load certificate from the PKI directory + signedCert, intermediates, err := TryLoadCertChainFromDisk(l.pkiDir, l.baseName) + if err != nil { + return errors.Wrapf(err, "failure loading certificate for %s", l.uxName) + } + // Validate period + CheckCertificatePeriodValidity(l.uxName, signedCert) + + // Check if the cert is signed by the CA + if err := VerifyCertChain(signedCert, intermediates, caCert); err != nil { + return errors.Wrapf(err, "certificate %s is not signed by corresponding CA", l.uxName) + } + return nil +} + +// validateCertificateWithConfig makes sure that a given certificate is valid at +// least for the SANs defined in the configuration. +func validateCertificateWithConfig(cert *x509.Certificate, baseName string, cfg *CertConfig) error { + for _, dnsName := range cfg.AltNames.DNSNames { + if err := cert.VerifyHostname(dnsName); err != nil { + return errors.Wrapf(err, "certificate %s is invalid", baseName) + } + } + for _, ipAddress := range cfg.AltNames.IPs { + if err := cert.VerifyHostname(ipAddress.String()); err != nil { + return errors.Wrapf(err, "certificate %s is invalid", baseName) + } + } + return nil +} + +// CheckCertificatePeriodValidity takes a certificate and prints a warning if its period +// is not valid related to the current time. It does so only if the certificate was not validated already +// by keeping track with a cache. +func CheckCertificatePeriodValidity(baseName string, cert *x509.Certificate) { + certPeriodValidationMutex.Lock() + defer certPeriodValidationMutex.Unlock() + if _, exists := certPeriodValidation[baseName]; exists { + return + } + certPeriodValidation[baseName] = struct{}{} + + klog.V(5).Infof("validating certificate period for %s certificate", baseName) + if err := ValidateCertPeriod(cert, 0); err != nil { + klog.Warningf("WARNING: could not validate bounds for certificate %s: %v", baseName, err) + } +} diff --git a/pkg/certs/constants.go b/pkg/certs/constants.go new file mode 100644 index 000000000..6b69677e7 --- /dev/null +++ b/pkg/certs/constants.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +import ( + "time" +) + +const ( + // CertificateValidity defines the validity for all the signed certificates generated by kubeadm + CertificateValidity = time.Hour * 24 * 365 + + // CACertAndKeyBaseName defines certificate authority base name + CACertAndKeyBaseName = "ca" + // CAKeyName defines certificate name + CAKeyName = "ca.key" + + // APIServerCertAndKeyBaseName defines API's server certificate and key base name + APIServerCertAndKeyBaseName = "apiserver" + // APIServerCertName defines API's server certificate name + APIServerCertName = "apiserver.crt" + // APIServerCertCommonName defines API's server certificate common name (CN) + APIServerCertCommonName = "kube-apiserver" + + // APIServerKubeletClientCertAndKeyBaseName defines kubelet client certificate and key base name + APIServerKubeletClientCertAndKeyBaseName = "apiserver-kubelet-client" + // APIServerKubeletClientCertCommonName defines kubelet client certificate common name (CN) + APIServerKubeletClientCertCommonName = "kube-apiserver-kubelet-client" + + // EtcdCACertAndKeyBaseName defines etcd's CA certificate and key base name + EtcdCACertAndKeyBaseName = "etcd/ca" + + // EtcdServerCertAndKeyBaseName defines etcd's server certificate and key base name + EtcdServerCertAndKeyBaseName = "etcd/server" + // EtcdServerCertName defines etcd's server certificate name + EtcdServerCertName = "etcd/server.crt" + + // EtcdPeerCertAndKeyBaseName defines etcd's peer certificate and key base name + EtcdPeerCertAndKeyBaseName = "etcd/peer" + // EtcdPeerCertName defines etcd's peer certificate name + EtcdPeerCertName = "etcd/peer.crt" + + // EtcdHealthcheckClientCertAndKeyBaseName defines etcd's healthcheck client certificate and key base name + EtcdHealthcheckClientCertAndKeyBaseName = "etcd/healthcheck-client" + // EtcdHealthcheckClientCertCommonName defines etcd's healthcheck client certificate common name (CN) + EtcdHealthcheckClientCertCommonName = "kube-etcd-healthcheck-client" + + // APIServerEtcdClientCertAndKeyBaseName defines apiserver's etcd client certificate and key base name + APIServerEtcdClientCertAndKeyBaseName = "apiserver-etcd-client" + // APIServerEtcdClientCertCommonName defines apiserver's etcd client certificate common name (CN) + APIServerEtcdClientCertCommonName = "kube-apiserver-etcd-client" + + // ServiceAccountKeyBaseName defines SA key base name + ServiceAccountKeyBaseName = "sa" + // ServiceAccountPrivateKeyName defines SA private key base name + ServiceAccountPrivateKeyName = "sa.key" + + // FrontProxyCACertAndKeyBaseName defines front proxy CA certificate and key base name + FrontProxyCACertAndKeyBaseName = "front-proxy-ca" + // FrontProxyClientCertAndKeyBaseName defines front proxy certificate and key base name + FrontProxyClientCertAndKeyBaseName = "front-proxy-client" + // FrontProxyClientCertCommonName defines front proxy certificate common name + FrontProxyClientCertCommonName = "front-proxy-client" //used as subject.commonname attribute (CN) + + // AdminKubeConfigFileName defines name for the kubeconfig aimed to be used by the superuser/admin of the cluster + AdminKubeConfigFileName = "admin.conf" + + // KubeletKubeConfigFileName defines the file name for the kubeconfig that the control-plane kubelet will use for talking + // to the API server + KubeletKubeConfigFileName = "kubelet.conf" + // ControllerManagerKubeConfigFileName defines the file name for the controller manager's kubeconfig file + ControllerManagerKubeConfigFileName = "controller-manager.conf" + // SchedulerKubeConfigFileName defines the file name for the scheduler's kubeconfig file + SchedulerKubeConfigFileName = "scheduler.conf" + + // Some well-known users and groups in the core Kubernetes authorization system + + // ControllerManagerUser defines the well-known user the controller-manager should be authenticated as + ControllerManagerUser = "system:kube-controller-manager" + // SchedulerUser defines the well-known user the scheduler should be authenticated as + SchedulerUser = "system:kube-scheduler" + // SystemPrivilegedGroup defines the well-known group for the apiservers. This group is also superuser by default + // (i.e. bound to the cluster-admin ClusterRole) + SystemPrivilegedGroup = "system:masters" + // NodesGroup defines the well-known group for all nodes. + NodesGroup = "system:nodes" + // NodesUserPrefix defines the user name prefix as requested by the Node authorizer. + NodesUserPrefix = "system:node:" + + // DefaultAPIServerBindAddress is the default bind address for the API Server + DefaultAPIServerBindAddress = "0.0.0.0" +) diff --git a/pkg/certs/init.go b/pkg/certs/init.go new file mode 100644 index 000000000..2a9a43d2d --- /dev/null +++ b/pkg/certs/init.go @@ -0,0 +1,126 @@ +/* +Copyright 2017 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +import ( + "net" + "reflect" + "strconv" + "strings" + + "github.com/pkg/errors" + netutil "k8s.io/apimachinery/pkg/util/net" + "k8s.io/klog/v2" +) + +// SetInitDynamicDefaults checks and sets configuration values for the InitConfiguration object +func SetInitDynamicDefaults() (*InitConfiguration, error) { + cfg := &InitConfiguration{} + + if err := SetAPIEndpointDynamicDefaults(&cfg.LocalAPIEndpoint); err != nil { + return nil, err + } + err := SetClusterDynamicDefaults(&cfg.ClusterConfiguration, &cfg.LocalAPIEndpoint, &cfg.NodeRegistration) + if err != nil { + return nil, err + } + + return cfg, nil +} + +// SetAPIEndpointDynamicDefaults checks and sets configuration values for the APIEndpoint object +func SetAPIEndpointDynamicDefaults(cfg *APIEndpoint) error { + // validate cfg.API.AdvertiseAddress. + addressIP := net.ParseIP(cfg.AdvertiseAddress) + if addressIP == nil && cfg.AdvertiseAddress != "" { + return errors.Errorf("couldn't use \"%s\" as \"apiserver-advertise-address\", must be ipv4 or ipv6 address", cfg.AdvertiseAddress) + } + + // kubeadm allows users to specify address=Loopback as a selector for global unicast IP address that can be found on loopback interface. + // e.g. This is required for network setups where default routes are present, but network interfaces use only link-local addresses (e.g. as described in RFC5549). + if addressIP.IsLoopback() { + loopbackIP, err := netutil.ChooseBindAddressForInterface(netutil.LoopbackInterfaceName) + if err != nil { + return err + } + if loopbackIP != nil { + klog.V(4).Infof("Found active IP %v on loopback interface", loopbackIP.String()) + cfg.AdvertiseAddress = loopbackIP.String() + return nil + } + return errors.New("unable to resolve link-local addresses") + } + + // This is the same logic as the API Server uses, except that if no interface is found the address is set to 0.0.0.0, which is invalid and cannot be used + // for bootstrapping a cluster. + ip, err := ChooseAPIServerBindAddress(addressIP) + if err != nil { + return err + } + cfg.AdvertiseAddress = ip.String() + + return nil +} + +// ChooseAPIServerBindAddress is a wrapper for netutil.ResolveBindAddress that also handles +// the case where no default routes were found and an IP for the API server could not be obtained. +func ChooseAPIServerBindAddress(bindAddress net.IP) (net.IP, error) { + ip, err := netutil.ResolveBindAddress(bindAddress) + if err != nil { + if netutil.IsNoRoutesError(err) { + klog.Warningf("WARNING: could not obtain a bind address for the API Server: %v; using: %s", err, DefaultAPIServerBindAddress) + defaultIP := net.ParseIP(DefaultAPIServerBindAddress) + if defaultIP == nil { + return nil, errors.Errorf("cannot parse default IP address: %s", DefaultAPIServerBindAddress) + } + return defaultIP, nil + } + return nil, err + } + if bindAddress != nil && !bindAddress.IsUnspecified() && !reflect.DeepEqual(ip, bindAddress) { + klog.Warningf("WARNING: overriding requested API server bind address: requested %q, actual %q", bindAddress, ip) + } + return ip, nil +} + +// SetClusterDynamicDefaults checks and sets values for the ClusterConfiguration object +func SetClusterDynamicDefaults(cfg *ClusterConfiguration, localAPIEndpoint *APIEndpoint, nodeRegOpts *NodeRegistrationOptions) error { + // If ControlPlaneEndpoint is specified without a port number defaults it to + // the bindPort number of the APIEndpoint. + // This will allow join of additional control plane instances with different bindPort number + if cfg.ControlPlaneEndpoint != "" { + host, port, err := ParseHostPort(cfg.ControlPlaneEndpoint) + if err != nil { + return err + } + if port == "" { + cfg.ControlPlaneEndpoint = net.JoinHostPort(host, strconv.FormatInt(int64(localAPIEndpoint.BindPort), 10)) + } + } + + // Downcase SANs. Some domain names (like ELBs) have capitals in them. + LowercaseSANs(cfg.APIServer.CertSANs) + return nil +} + +// LowercaseSANs can be used to force all SANs to be lowercase so it passes IsDNS1123Subdomain +func LowercaseSANs(sans []string) { + for i, san := range sans { + lowercase := strings.ToLower(san) + if lowercase != san { + klog.V(1).Infof("lowercasing SAN %q to %q", san, lowercase) + sans[i] = lowercase + } + } +} diff --git a/pkg/certs/kubeconfig.go b/pkg/certs/kubeconfig.go new file mode 100644 index 000000000..578c5f8e3 --- /dev/null +++ b/pkg/certs/kubeconfig.go @@ -0,0 +1,293 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +import ( + "bytes" + "crypto" + "crypto/x509" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/pkg/errors" + + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/keyutil" + "k8s.io/klog/v2" +) + +// clientCertAuth struct holds info required to build a client certificate to provide authentication info in a kubeconfig object +type clientCertAuth struct { + CAKey crypto.Signer + Organizations []string +} + +// tokenAuth struct holds info required to use a token to provide authentication info in a kubeconfig object +type tokenAuth struct { + Token string `datapolicy:"token"` +} + +// kubeConfigSpec struct holds info required to build a KubeConfig object +type kubeConfigSpec struct { + CACert *x509.Certificate + APIServer string + ClientName string + TokenAuth *tokenAuth `datapolicy:"token"` + ClientCertAuth *clientCertAuth `datapolicy:"security-key"` +} + +// CreateJoinControlPlaneKubeConfigFiles will create and write to disk the kubeconfig files required by kubeadm +// join --control-plane workflow, plus the admin kubeconfig file used by the administrator and kubeadm itself; the +// kubelet.conf file must not be created because it will be created and signed by the kubelet TLS bootstrap process. +// When not using external CA mode, if a kubeconfig file already exists it is used only if evaluated equal, +// otherwise an error is returned. For external CA mode, the creation of kubeconfig files is skipped. +func CreateJoinControlPlaneKubeConfigFiles(outDir string, cfg *InitConfiguration) error { + var externaCA bool + caKeyPath := filepath.Join(cfg.CertificatesDir, CAKeyName) + if _, err := os.Stat(caKeyPath); os.IsNotExist(err) { + externaCA = true + } + + files := []string{ + AdminKubeConfigFileName, + ControllerManagerKubeConfigFileName, + SchedulerKubeConfigFileName, + } + + for _, file := range files { + if externaCA { + fmt.Printf("[kubeconfig] External CA mode: Using user provided %s\n", file) + continue + } + if err := createKubeConfigFiles(outDir, cfg, file); err != nil { + return err + } + } + return nil +} + +// createKubeConfigFiles creates all the requested kubeconfig files. +// If kubeconfig files already exists, they are used only if evaluated equal; otherwise an error is returned. +func createKubeConfigFiles(outDir string, cfg *InitConfiguration, kubeConfigFileNames ...string) error { + + // gets the KubeConfigSpecs, actualized for the current InitConfiguration + specs, err := getKubeConfigSpecs(cfg) + if err != nil { + return err + } + + for _, kubeConfigFileName := range kubeConfigFileNames { + // retrieves the KubeConfigSpec for given kubeConfigFileName + spec, exists := specs[kubeConfigFileName] + if !exists { + return errors.Errorf("couldn't retrieve KubeConfigSpec for %s", kubeConfigFileName) + } + + // builds the KubeConfig object + config, err := buildKubeConfigFromSpec(spec, cfg.ClusterName, nil) + if err != nil { + return err + } + + // writes the kubeconfig to disk if it does not exist + if err = createKubeConfigFileIfNotExists(outDir, kubeConfigFileName, config); err != nil { + return err + } + } + + return nil +} + +// getKubeConfigSpecs returns all KubeConfigSpecs actualized to the context of the current InitConfiguration +// NB. this method holds the information about how kubeadm creates kubeconfig files. +func getKubeConfigSpecs(cfg *InitConfiguration) (map[string]*kubeConfigSpec, error) { + caCert, caKey, err := TryLoadCertAndKeyFromDisk(cfg.CertificatesDir, CACertAndKeyBaseName) + if err != nil { + return nil, errors.Wrap(err, "couldn't create a kubeconfig; the CA files couldn't be loaded") + } + // Validate period + CheckCertificatePeriodValidity(CACertAndKeyBaseName, caCert) + + configs, err := getKubeConfigSpecsBase(cfg) + if err != nil { + return nil, err + } + for _, spec := range configs { + spec.CACert = caCert + spec.ClientCertAuth.CAKey = caKey + } + return configs, nil +} + +// buildKubeConfigFromSpec creates a kubeconfig object for the given kubeConfigSpec +func buildKubeConfigFromSpec(spec *kubeConfigSpec, clustername string, notAfter *time.Time) (*clientcmdapi.Config, error) { + + // If this kubeconfig should use token + if spec.TokenAuth != nil { + // create a kubeconfig with a token + return CreateWithToken( + spec.APIServer, + clustername, + spec.ClientName, + EncodeCertPEM(spec.CACert), + spec.TokenAuth.Token, + ), nil + } + + // otherwise, create a client certs + clientCertConfig := newClientCertConfigFromKubeConfigSpec(spec, notAfter) + + clientCert, clientKey, err := NewCertAndKey(spec.CACert, spec.ClientCertAuth.CAKey, &clientCertConfig) + if err != nil { + return nil, errors.Wrapf(err, "failure while creating %s client certificate", spec.ClientName) + } + + encodedClientKey, err := keyutil.MarshalPrivateKeyToPEM(clientKey) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal private key to PEM") + } + // create a kubeconfig with the client certs + return CreateWithCerts( + spec.APIServer, + clustername, + spec.ClientName, + EncodeCertPEM(spec.CACert), + encodedClientKey, + EncodeCertPEM(clientCert), + ), nil +} + +func newClientCertConfigFromKubeConfigSpec(spec *kubeConfigSpec, notAfter *time.Time) CertConfig { + return CertConfig{ + Config: certutil.Config{ + CommonName: spec.ClientName, + Organization: spec.ClientCertAuth.Organizations, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }, + NotAfter: notAfter, + } +} + +// validateKubeConfig check if the kubeconfig file exist and has the expected CA and server URL +func validateKubeConfig(outDir, filename string, config *clientcmdapi.Config) error { + kubeConfigFilePath := filepath.Join(outDir, filename) + + if _, err := os.Stat(kubeConfigFilePath); err != nil { + return err + } + + // The kubeconfig already exists, let's check if it has got the same CA and server URL + currentConfig, err := clientcmd.LoadFromFile(kubeConfigFilePath) + if err != nil { + return errors.Wrapf(err, "failed to load kubeconfig file %s that already exists on disk", kubeConfigFilePath) + } + + expectedCtx, exists := config.Contexts[config.CurrentContext] + if !exists { + return errors.Errorf("failed to find expected context %s", config.CurrentContext) + } + expectedCluster := expectedCtx.Cluster + currentCtx, exists := currentConfig.Contexts[currentConfig.CurrentContext] + if !exists { + return errors.Errorf("failed to find CurrentContext in Contexts of the kubeconfig file %s", kubeConfigFilePath) + } + currentCluster := currentCtx.Cluster + if currentConfig.Clusters[currentCluster] == nil { + return errors.Errorf("failed to find the given CurrentContext Cluster in Clusters of the kubeconfig file %s", kubeConfigFilePath) + } + + // Make sure the compared CAs are whitespace-trimmed. The function clientcmd.LoadFromFile() just decodes + // the base64 CA and places it raw in the v1.Config object. In case the user has extra whitespace + // in the CA they used to create a kubeconfig this comparison to a generated v1.Config will otherwise fail. + caCurrent := bytes.TrimSpace(currentConfig.Clusters[currentCluster].CertificateAuthorityData) + caExpected := bytes.TrimSpace(config.Clusters[expectedCluster].CertificateAuthorityData) + + // If the current CA cert on disk doesn't match the expected CA cert, error out because we have a file, but it's stale + if !bytes.Equal(caCurrent, caExpected) { + return errors.Errorf("a kubeconfig file %q exists already but has got the wrong CA cert", kubeConfigFilePath) + } + // If the current API Server location on disk doesn't match the expected API server, show a warning + if currentConfig.Clusters[currentCluster].Server != config.Clusters[expectedCluster].Server { + klog.Warningf("a kubeconfig file %q exists already but has an unexpected API Server URL: expected: %s, got: %s", + kubeConfigFilePath, config.Clusters[expectedCluster].Server, currentConfig.Clusters[currentCluster].Server) + } + + return nil +} + +// createKubeConfigFileIfNotExists saves the KubeConfig object into a file if there isn't any file at the given path. +// If there already is a kubeconfig file at the given path; kubeadm tries to load it and check if the values in the +// existing and the expected config equals. If they do; kubeadm will just skip writing the file as it's up-to-date, +// but if a file exists but has old content or isn't a kubeconfig file, this function returns an error. +func createKubeConfigFileIfNotExists(outDir, filename string, config *clientcmdapi.Config) error { + kubeConfigFilePath := filepath.Join(outDir, filename) + + err := validateKubeConfig(outDir, filename, config) + if err != nil { + // Check if the file exist, and if it doesn't, just write it to disk + if !os.IsNotExist(err) { + return err + } + fmt.Printf("[kubeconfig] Writing %q kubeconfig file\n", filename) + err = WriteToDisk(kubeConfigFilePath, config) + if err != nil { + return errors.Wrapf(err, "failed to save kubeconfig file %q on disk", kubeConfigFilePath) + } + return nil + } + // kubeadm doesn't validate the existing kubeconfig file more than this (kubeadm trusts the client certs to be valid) + // Basically, if we find a kubeconfig file with the same path; the same CA cert and the same server URL; + // kubeadm thinks those files are equal and doesn't bother writing a new file + fmt.Printf("[kubeconfig] Using existing kubeconfig file: %q\n", kubeConfigFilePath) + + return nil +} + +func getKubeConfigSpecsBase(cfg *InitConfiguration) (map[string]*kubeConfigSpec, error) { + controlPlaneEndpoint, err := GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint) + if err != nil { + return nil, err + } + + return map[string]*kubeConfigSpec{ + AdminKubeConfigFileName: { + APIServer: controlPlaneEndpoint, + ClientName: "kubernetes-admin", + ClientCertAuth: &clientCertAuth{ + Organizations: []string{SystemPrivilegedGroup}, + }, + }, + KubeletKubeConfigFileName: { + APIServer: controlPlaneEndpoint, + ClientName: fmt.Sprintf("%s%s", NodesUserPrefix, cfg.NodeRegistration.Name), + ClientCertAuth: &clientCertAuth{ + Organizations: []string{NodesGroup}, + }, + }, + ControllerManagerKubeConfigFileName: { + APIServer: controlPlaneEndpoint, + ClientName: ControllerManagerUser, + ClientCertAuth: &clientCertAuth{}, + }, + SchedulerKubeConfigFileName: { + APIServer: controlPlaneEndpoint, + ClientName: SchedulerUser, + ClientCertAuth: &clientCertAuth{}, + }, + }, nil +} diff --git a/pkg/certs/kubeconfig_util.go b/pkg/certs/kubeconfig_util.go new file mode 100644 index 000000000..f581c3277 --- /dev/null +++ b/pkg/certs/kubeconfig_util.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +import ( + "fmt" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// CreateBasic creates a basic, general KubeConfig object that then can be extended +func CreateBasic(serverURL, clusterName, userName string, caCert []byte) *clientcmdapi.Config { + // Use the cluster and the username as the context name + contextName := fmt.Sprintf("%s@%s", userName, clusterName) + + return &clientcmdapi.Config{ + Clusters: map[string]*clientcmdapi.Cluster{ + clusterName: { + Server: serverURL, + CertificateAuthorityData: caCert, + }, + }, + Contexts: map[string]*clientcmdapi.Context{ + contextName: { + Cluster: clusterName, + AuthInfo: userName, + }, + }, + AuthInfos: map[string]*clientcmdapi.AuthInfo{}, + CurrentContext: contextName, + } +} + +// CreateWithCerts creates a KubeConfig object with access to the API server with client certificates +func CreateWithCerts(serverURL, clusterName, userName string, caCert []byte, clientKey []byte, clientCert []byte) *clientcmdapi.Config { + config := CreateBasic(serverURL, clusterName, userName, caCert) + config.AuthInfos[userName] = &clientcmdapi.AuthInfo{ + ClientKeyData: clientKey, + ClientCertificateData: clientCert, + } + return config +} + +// CreateWithToken creates a KubeConfig object with access to the API server with a token +func CreateWithToken(serverURL, clusterName, userName string, caCert []byte, token string) *clientcmdapi.Config { + config := CreateBasic(serverURL, clusterName, userName, caCert) + config.AuthInfos[userName] = &clientcmdapi.AuthInfo{ + Token: token, + } + return config +} + +// WriteToDisk writes a KubeConfig object down to disk with mode 0600 +func WriteToDisk(filename string, kubeconfig *clientcmdapi.Config) error { + err := clientcmd.WriteToFile(*kubeconfig, filename) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/certs/types.go b/pkg/certs/types.go new file mode 100644 index 000000000..042f21fea --- /dev/null +++ b/pkg/certs/types.go @@ -0,0 +1,174 @@ +/* +Copyright 2016 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +import ( + "crypto/x509" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InitConfiguration contains a list of fields that are specifically "kubeadm init"-only runtime +// information. The cluster-wide config is stored in ClusterConfiguration. The InitConfiguration +// object IS NOT uploaded to the kubeadm-config ConfigMap in the cluster, only the +// ClusterConfiguration is. +type InitConfiguration struct { + metav1.TypeMeta + + ClusterName string + + // ClusterConfiguration holds the cluster-wide information, and embeds that struct (which can be (un)marshalled separately as well) + // When InitConfiguration is marshalled to bytes in the external version, this information IS NOT preserved (which can be seen from + // the `json:"-"` tag in the external variant of these API types. + ClusterConfiguration `json:"-"` + + // NodeRegistration holds fields that relate to registering the new control-plane node to the cluster + NodeRegistration NodeRegistrationOptions + + // LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + // In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + // is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + // configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + // on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process + // fails you may set the desired value here. + LocalAPIEndpoint APIEndpoint + + // CertificateKey sets the key with which certificates and keys are encrypted prior to being uploaded in + // a secret in the cluster during the uploadcerts init phase. + CertificateKey string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterConfiguration contains cluster-wide configuration for a kubeadm cluster +type ClusterConfiguration struct { + // Etcd holds configuration for etcd. + Etcd Etcd + + // Networking holds configuration for the networking topology of the cluster. + Networking Networking + + // ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + // can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + // In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + // are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + // the BindPort is used. + // Possible usages are: + // e.g. In a cluster with more than one control plane instances, this field should be + // assigned the address of the external load balancer in front of the + // control plane instances. + // e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + // could be used for assigning a stable DNS to the control plane. + ControlPlaneEndpoint string + + // APIServer contains extra settings for the API server control plane component + APIServer APIServer + + // CertificatesDir specifies where to store or look for all required certificates. + CertificatesDir string +} + +// APIServer holds settings necessary for API server deployments in the cluster +type APIServer struct { + // CertSANs sets extra Subject Alternative Names for the API Server signing cert. + CertSANs []string + + // TimeoutForControlPlane controls the timeout that we use for API server to appear + TimeoutForControlPlane *metav1.Duration +} + +// APIEndpoint struct contains elements of API server instance deployed on a node. +type APIEndpoint struct { + // AdvertiseAddress sets the IP address for the API server to advertise. + AdvertiseAddress string + + // BindPort sets the secure port for the API Server to bind to. + // Defaults to 6443. + BindPort int32 +} + +// NodeRegistrationOptions holds fields that relate to registering a new control-plane or node to the cluster, either via "kubeadm init" or "kubeadm join" +type NodeRegistrationOptions struct { + + // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + // This field is also used in the CommonName field of the kubelet's client certificate to the API server. + // Defaults to the hostname of the node if not provided. + Name string +} + +// Networking contains elements describing cluster's networking configuration. +type Networking struct { + // ServiceSubnet is the subnet used by k8s services. Defaults to "10.96.0.0/12". + ServiceSubnet string + // DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". + DNSDomain string +} + +// Etcd contains elements describing Etcd configuration. +type Etcd struct { + + // Local provides configuration knobs for configuring the local etcd instance + // Local and External are mutually exclusive + Local *LocalEtcd + + // External describes how to connect to an external etcd cluster + // Local and External are mutually exclusive + External *ExternalEtcd +} + +// LocalEtcd describes that kubeadm should run an etcd cluster locally +type LocalEtcd struct { + // ServerCertSANs sets extra Subject Alternative Names for the etcd server signing cert. + ServerCertSANs []string + // PeerCertSANs sets extra Subject Alternative Names for the etcd peer signing cert. + PeerCertSANs []string +} + +// ExternalEtcd describes an external etcd cluster +type ExternalEtcd struct { + + // Endpoints of etcd members. Useful for using external etcd. + // If not provided, kubeadm will run etcd in a static pod. + Endpoints []string + // CAFile is an SSL Certificate Authority file used to secure etcd communication. + CAFile string + // CertFile is an SSL certification file used to secure etcd communication. + CertFile string + // KeyFile is an SSL key file used to secure etcd communication. + KeyFile string +} + +// PublicKeyAlgorithm returns the type of encryption keys used in the cluster. +func (cfg *ClusterConfiguration) PublicKeyAlgorithm() x509.PublicKeyAlgorithm { + return x509.RSA +} + +// Patches contains options related to applying patches to components deployed by kubeadm. +type Patches struct { + // Directory is a path to a directory that contains files named "target[suffix][+patchtype].extension". + // For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of + // "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one + // of "strategic" "merge" or "json" and they match the patch formats supported by kubectl. + // The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". + // "suffix" is an optional string that can be used to determine which patches are applied + // first alpha-numerically. + Directory string +} + +// DocumentMap is a convenient way to describe a map between a YAML document and its GVK type +// +k8s:deepcopy-gen=false +type DocumentMap map[schema.GroupVersionKind][]byte diff --git a/pkg/certs/util.go b/pkg/certs/util.go new file mode 100644 index 000000000..154d49877 --- /dev/null +++ b/pkg/certs/util.go @@ -0,0 +1,656 @@ +/* +Copyright 2016 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math" + "math/big" + "net" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/keyutil" + netutils "k8s.io/utils/net" +) + +const ( + // PublicKeyBlockType is a possible value for pem.Block.Type. + PublicKeyBlockType = "PUBLIC KEY" + // CertificateBlockType is a possible value for pem.Block.Type. + CertificateBlockType = "CERTIFICATE" + rsaKeySize = 2048 +) + +// CertConfig is a wrapper around certutil.Config extending it with PublicKeyAlgorithm. +type CertConfig struct { + certutil.Config + NotAfter *time.Time + PublicKeyAlgorithm x509.PublicKeyAlgorithm +} + +// NewCertificateAuthority creates new certificate and private key for the certificate authority +func NewCertificateAuthority(config *CertConfig) (*x509.Certificate, crypto.Signer, error) { + key, err := NewPrivateKey(config.PublicKeyAlgorithm) + if err != nil { + return nil, nil, errors.Wrap(err, "unable to create private key while generating CA certificate") + } + + cert, err := certutil.NewSelfSignedCACert(config.Config, key) + if err != nil { + return nil, nil, errors.Wrap(err, "unable to create self-signed CA certificate") + } + + return cert, key, nil +} + +// NewCertAndKey creates new certificate and key by passing the certificate authority certificate and key +func NewCertAndKey(caCert *x509.Certificate, caKey crypto.Signer, config *CertConfig) (*x509.Certificate, crypto.Signer, error) { + if len(config.Usages) == 0 { + return nil, nil, errors.New("must specify at least one ExtKeyUsage") + } + + key, err := NewPrivateKey(config.PublicKeyAlgorithm) + if err != nil { + return nil, nil, errors.Wrap(err, "unable to create private key") + } + + cert, err := NewSignedCert(config, key, caCert, caKey, false) + if err != nil { + return nil, nil, errors.Wrap(err, "unable to sign certificate") + } + + return cert, key, nil +} + +// HasServerAuth returns true if the given certificate is a ServerAuth +func HasServerAuth(cert *x509.Certificate) bool { + for i := range cert.ExtKeyUsage { + if cert.ExtKeyUsage[i] == x509.ExtKeyUsageServerAuth { + return true + } + } + return false +} + +// WriteCertAndKey stores certificate and key at the specified location +func WriteCertAndKey(pkiPath string, name string, cert *x509.Certificate, key crypto.Signer) error { + if err := WriteKey(pkiPath, name, key); err != nil { + return errors.Wrap(err, "couldn't write key") + } + + return WriteCert(pkiPath, name, cert) +} + +// WriteCert stores the given certificate at the given location +func WriteCert(pkiPath, name string, cert *x509.Certificate) error { + if cert == nil { + return errors.New("certificate cannot be nil when writing to file") + } + + certificatePath := pathForCert(pkiPath, name) + if err := certutil.WriteCert(certificatePath, EncodeCertPEM(cert)); err != nil { + return errors.Wrapf(err, "unable to write certificate to file %s", certificatePath) + } + + return nil +} + +// WriteKey stores the given key at the given location +func WriteKey(pkiPath, name string, key crypto.Signer) error { + if key == nil { + return errors.New("private key cannot be nil when writing to file") + } + + privateKeyPath := pathForKey(pkiPath, name) + encoded, err := keyutil.MarshalPrivateKeyToPEM(key) + if err != nil { + return errors.Wrapf(err, "unable to marshal private key to PEM") + } + if err := keyutil.WriteKey(privateKeyPath, encoded); err != nil { + return errors.Wrapf(err, "unable to write private key to file %s", privateKeyPath) + } + + return nil +} + +// WritePublicKey stores the given public key at the given location +func WritePublicKey(pkiPath, name string, key crypto.PublicKey) error { + if key == nil { + return errors.New("public key cannot be nil when writing to file") + } + + publicKeyBytes, err := EncodePublicKeyPEM(key) + if err != nil { + return err + } + publicKeyPath := pathForPublicKey(pkiPath, name) + if err := keyutil.WriteKey(publicKeyPath, publicKeyBytes); err != nil { + return errors.Wrapf(err, "unable to write public key to file %s", publicKeyPath) + } + + return nil +} + +// CertOrKeyExist returns a boolean whether the cert or the key exists +func CertOrKeyExist(pkiPath, name string) bool { + certificatePath, privateKeyPath := PathsForCertAndKey(pkiPath, name) + + _, certErr := os.Stat(certificatePath) + _, keyErr := os.Stat(privateKeyPath) + if os.IsNotExist(certErr) && os.IsNotExist(keyErr) { + // The cert and the key do not exist + return false + } + + // Both files exist or one of them + return true +} + +// TryLoadCertAndKeyFromDisk tries to load a cert and a key from the disk and validates that they are valid +func TryLoadCertAndKeyFromDisk(pkiPath, name string) (*x509.Certificate, crypto.Signer, error) { + cert, err := TryLoadCertFromDisk(pkiPath, name) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to load certificate") + } + + key, err := TryLoadKeyFromDisk(pkiPath, name) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to load key") + } + + return cert, key, nil +} + +// TryLoadCertFromDisk tries to load the cert from the disk +func TryLoadCertFromDisk(pkiPath, name string) (*x509.Certificate, error) { + certificatePath := pathForCert(pkiPath, name) + + certs, err := certutil.CertsFromFile(certificatePath) + if err != nil { + return nil, errors.Wrapf(err, "couldn't load the certificate file %s", certificatePath) + } + + // We are only putting one certificate in the certificate pem file, so it's safe to just pick the first one + // TODO: Support multiple certs here in order to be able to rotate certs + cert := certs[0] + + return cert, nil +} + +// TryLoadCertChainFromDisk tries to load the cert chain from the disk +func TryLoadCertChainFromDisk(pkiPath, name string) (*x509.Certificate, []*x509.Certificate, error) { + certificatePath := pathForCert(pkiPath, name) + + certs, err := certutil.CertsFromFile(certificatePath) + if err != nil { + return nil, nil, errors.Wrapf(err, "couldn't load the certificate file %s", certificatePath) + } + + cert := certs[0] + intermediates := certs[1:] + + return cert, intermediates, nil +} + +// TryLoadKeyFromDisk tries to load the key from the disk and validates that it is valid +func TryLoadKeyFromDisk(pkiPath, name string) (crypto.Signer, error) { + privateKeyPath := pathForKey(pkiPath, name) + + // Parse the private key from a file + privKey, err := keyutil.PrivateKeyFromFile(privateKeyPath) + if err != nil { + return nil, errors.Wrapf(err, "couldn't load the private key file %s", privateKeyPath) + } + + // Allow RSA and ECDSA formats only + var key crypto.Signer + switch k := privKey.(type) { + case *rsa.PrivateKey: + key = k + case *ecdsa.PrivateKey: + key = k + default: + return nil, errors.Errorf("the private key file %s is neither in RSA nor ECDSA format", privateKeyPath) + } + + return key, nil +} + +// PathsForCertAndKey returns the paths for the certificate and key given the path and basename. +func PathsForCertAndKey(pkiPath, name string) (string, string) { + return pathForCert(pkiPath, name), pathForKey(pkiPath, name) +} + +func pathForCert(pkiPath, name string) string { + return filepath.Join(pkiPath, fmt.Sprintf("%s.crt", name)) +} + +func pathForKey(pkiPath, name string) string { + return filepath.Join(pkiPath, fmt.Sprintf("%s.key", name)) +} + +func pathForPublicKey(pkiPath, name string) string { + return filepath.Join(pkiPath, fmt.Sprintf("%s.pub", name)) +} + +// GetControlPlaneEndpoint returns a properly formatted endpoint for the control plane built according following rules: +// - If the controlPlaneEndpoint is defined, use it. +// - if the controlPlaneEndpoint is defined but without a port number, use the controlPlaneEndpoint + localEndpoint.BindPort is used. +// - Otherwise, in case the controlPlaneEndpoint is not defined, use the localEndpoint.AdvertiseAddress + the localEndpoint.BindPort. +func GetControlPlaneEndpoint(controlPlaneEndpoint string, localEndpoint *APIEndpoint) (string, error) { + // get the URL of the local endpoint + localAPIEndpoint, err := GetLocalAPIEndpoint(localEndpoint) + if err != nil { + return "", err + } + + // if the controlplane endpoint is defined + if len(controlPlaneEndpoint) > 0 { + // parse the controlplane endpoint + var host, port string + var err error + if host, port, err = ParseHostPort(controlPlaneEndpoint); err != nil { + return "", errors.Wrapf(err, "invalid value %q given for controlPlaneEndpoint", controlPlaneEndpoint) + } + + // if a port is provided within the controlPlaneAddress warn the users we are using it, else use the bindport + localEndpointPort := strconv.Itoa(int(localEndpoint.BindPort)) + if port != "" { + if port != localEndpointPort { + fmt.Println("[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address") + } + } else { + port = localEndpointPort + } + + // overrides the control-plane url using the controlPlaneAddress (and eventually the bindport) + return formatURL(host, port).String(), nil + } + + return localAPIEndpoint, nil +} + +// parseAPIEndpoint parses an APIEndpoint and returns the AdvertiseAddress as net.IP and the BindPort as string. +// If the BindPort or AdvertiseAddress are invalid it returns an error. +func parseAPIEndpoint(localEndpoint *APIEndpoint) (net.IP, string, error) { + // parse the bind port + bindPortString := strconv.Itoa(int(localEndpoint.BindPort)) + if _, err := ParsePort(bindPortString); err != nil { + return nil, "", errors.Wrapf(err, "invalid value %q given for api.bindPort", localEndpoint.BindPort) + } + + // parse the AdvertiseAddress + var ip = net.ParseIP(localEndpoint.AdvertiseAddress) + if ip == nil { + return nil, "", errors.Errorf("invalid value `%s` given for api.advertiseAddress", localEndpoint.AdvertiseAddress) + } + + return ip, bindPortString, nil +} + +// formatURL takes a host and a port string and creates a net.URL using https scheme +func formatURL(host, port string) *url.URL { + return &url.URL{ + Scheme: "https", + Host: net.JoinHostPort(host, port), + } +} + +// GetLocalAPIEndpoint parses an APIEndpoint and returns it as a string, +// or returns and error in case it cannot be parsed. +func GetLocalAPIEndpoint(localEndpoint *APIEndpoint) (string, error) { + // get the URL of the local endpoint + localEndpointIP, localEndpointPort, err := parseAPIEndpoint(localEndpoint) + if err != nil { + return "", err + } + url := formatURL(localEndpointIP.String(), localEndpointPort) + return url.String(), nil +} + +// GetKubernetesServiceCIDR returns the default Service CIDR for the Kubernetes internal service +func GetKubernetesServiceCIDR(svcSubnetList string) (*net.IPNet, error) { + // The default service address family for the cluster is the address family of the first + // service cluster IP range configured via the `--service-cluster-ip-range` flag + // of the kube-controller-manager and kube-apiserver. + svcSubnets, err := netutils.ParseCIDRs(strings.Split(svcSubnetList, ",")) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse ServiceSubnet %v", svcSubnetList) + } + if len(svcSubnets) == 0 { + return nil, errors.New("received empty ServiceSubnet") + } + return svcSubnets[0], nil +} + +// GetAPIServerVirtualIP returns the IP of the internal Kubernetes API service +func GetAPIServerVirtualIP(svcSubnetList string) (net.IP, error) { + svcSubnet, err := GetKubernetesServiceCIDR(svcSubnetList) + if err != nil { + return nil, errors.Wrap(err, "unable to get internal Kubernetes Service IP from the given service CIDR") + } + internalAPIServerVirtualIP, err := netutils.GetIndexedIP(svcSubnet, 1) + if err != nil { + return nil, errors.Wrapf(err, "unable to get the first IP address from the given CIDR: %s", svcSubnet.String()) + } + return internalAPIServerVirtualIP, nil +} + +// GetAPIServerAltNames builds an AltNames object for to be used when generating apiserver certificate +func GetAPIServerAltNames(cfg *InitConfiguration) (*certutil.AltNames, error) { + // advertise address + advertiseAddress := net.ParseIP(cfg.LocalAPIEndpoint.AdvertiseAddress) + if advertiseAddress == nil { + return nil, errors.Errorf("error parsing LocalAPIEndpoint AdvertiseAddress %v: is not a valid textual representation of an IP address", + cfg.LocalAPIEndpoint.AdvertiseAddress) + } + + internalAPIServerVirtualIP, err := GetAPIServerVirtualIP(cfg.Networking.ServiceSubnet) + if err != nil { + return nil, errors.Wrapf(err, "unable to get first IP address from the given CIDR: %v", cfg.Networking.ServiceSubnet) + } + + // create AltNames with defaults DNSNames/IPs + altNames := &certutil.AltNames{ + DNSNames: []string{ + cfg.NodeRegistration.Name, + "kubernetes", + "kubernetes.default", + "kubernetes.default.svc", + fmt.Sprintf("kubernetes.default.svc.%s", cfg.Networking.DNSDomain), + }, + IPs: []net.IP{ + internalAPIServerVirtualIP, + advertiseAddress, + }, + } + + // add cluster controlPlaneEndpoint if present (dns or ip) + if len(cfg.ControlPlaneEndpoint) > 0 { + if host, _, err := ParseHostPort(cfg.ControlPlaneEndpoint); err == nil { + if ip := net.ParseIP(host); ip != nil { + altNames.IPs = append(altNames.IPs, ip) + } else { + altNames.DNSNames = append(altNames.DNSNames, host) + } + } else { + return nil, errors.Wrapf(err, "error parsing cluster controlPlaneEndpoint %q", cfg.ControlPlaneEndpoint) + } + } + + appendSANsToAltNames(altNames, cfg.APIServer.CertSANs, APIServerCertName) + + return altNames, nil +} + +// ParseHostPort parses a network address of the form "host:port", "ipv4:port", "[ipv6]:port" into host and port; +// ":port" can be eventually omitted. +// If the string is not a valid representation of network address, ParseHostPort returns an error. +func ParseHostPort(hostport string) (string, string, error) { + var host, port string + var err error + + // try to split host and port + if host, port, err = net.SplitHostPort(hostport); err != nil { + // if SplitHostPort returns an error, the entire hostport is considered as host + host = hostport + } + + // if port is defined, parse and validate it + if port != "" { + if _, err := ParsePort(port); err != nil { + return "", "", errors.Errorf("hostport %s: port %s must be a valid number between 1 and 65535, inclusive", hostport, port) + } + } + + // if host is a valid IP, returns it + if ip := net.ParseIP(host); ip != nil { + return host, port, nil + } + + // if host is a validate RFC-1123 subdomain, returns it + if errs := validation.IsDNS1123Subdomain(host); len(errs) == 0 { + return host, port, nil + } + + return "", "", errors.Errorf("hostport %s: host '%s' must be a valid IP address or a valid RFC-1123 DNS subdomain", hostport, host) +} + +// ParsePort parses a string representing a TCP port. +// If the string is not a valid representation of a TCP port, ParsePort returns an error. +func ParsePort(port string) (int, error) { + portInt, err := netutils.ParsePort(port, true) + if err == nil && (1 <= portInt && portInt <= 65535) { + return portInt, nil + } + + return 0, errors.New("port must be a valid number between 1 and 65535, inclusive") +} + +// GetEtcdAltNames builds an AltNames object for generating the etcd server certificate. +// `advertise address` and localhost are included in the SAN since this is the interfaces the etcd static pod listens on. +// The user can override the listen address with `Etcd.ExtraArgs` and add SANs with `Etcd.ServerCertSANs`. +func GetEtcdAltNames(cfg *InitConfiguration) (*certutil.AltNames, error) { + return getAltNames(cfg, EtcdServerCertName) +} + +// GetEtcdPeerAltNames builds an AltNames object for generating the etcd peer certificate. +// Hostname and `API.AdvertiseAddress` are included if the user chooses to promote the single node etcd cluster into a multi-node one (stacked etcd). +// The user can override the listen address with `Etcd.ExtraArgs` and add SANs with `Etcd.PeerCertSANs`. +func GetEtcdPeerAltNames(cfg *InitConfiguration) (*certutil.AltNames, error) { + return getAltNames(cfg, EtcdPeerCertName) +} + +// getAltNames builds an AltNames object with the cfg and certName. +func getAltNames(cfg *InitConfiguration, certName string) (*certutil.AltNames, error) { + // advertise address + advertiseAddress := net.ParseIP(cfg.LocalAPIEndpoint.AdvertiseAddress) + if advertiseAddress == nil { + return nil, errors.Errorf("error parsing LocalAPIEndpoint AdvertiseAddress %v: is not a valid textual representation of an IP address", + cfg.LocalAPIEndpoint.AdvertiseAddress) + } + + // create AltNames with defaults DNSNames/IPs + altNames := &certutil.AltNames{ + DNSNames: []string{cfg.NodeRegistration.Name, "localhost"}, + IPs: []net.IP{advertiseAddress, net.IPv4(127, 0, 0, 1), net.IPv6loopback}, + } + + if cfg.Etcd.Local != nil { + if certName == EtcdServerCertName { + appendSANsToAltNames(altNames, cfg.Etcd.Local.ServerCertSANs, EtcdServerCertName) + } else if certName == EtcdPeerCertName { + appendSANsToAltNames(altNames, cfg.Etcd.Local.PeerCertSANs, EtcdPeerCertName) + } + } + return altNames, nil +} + +// appendSANsToAltNames parses SANs from as list of strings and adds them to altNames for use on a specific cert +// altNames is passed in with a pointer, and the struct is modified +// valid IP address strings are parsed and added to altNames.IPs as net.IP's +// RFC-1123 compliant DNS strings are added to altNames.DNSNames as strings +// RFC-1123 compliant wildcard DNS strings are added to altNames.DNSNames as strings +// certNames is used to print user facing warnings and should be the name of the cert the altNames will be used for +func appendSANsToAltNames(altNames *certutil.AltNames, SANs []string, certName string) { + for _, altname := range SANs { + if ip := net.ParseIP(altname); ip != nil { + altNames.IPs = append(altNames.IPs, ip) + } else if len(validation.IsDNS1123Subdomain(altname)) == 0 { + altNames.DNSNames = append(altNames.DNSNames, altname) + } else if len(validation.IsWildcardDNS1123Subdomain(altname)) == 0 { + altNames.DNSNames = append(altNames.DNSNames, altname) + } else { + fmt.Printf( + "[certificates] WARNING: '%s' was not added to the '%s' SAN, because it is not a valid IP or RFC-1123 compliant DNS entry\n", + altname, + certName, + ) + } + } +} + +// EncodeCertPEM returns PEM-endcoded certificate data +func EncodeCertPEM(cert *x509.Certificate) []byte { + block := pem.Block{ + Type: CertificateBlockType, + Bytes: cert.Raw, + } + return pem.EncodeToMemory(&block) +} + +// EncodePublicKeyPEM returns PEM-encoded public data +func EncodePublicKeyPEM(key crypto.PublicKey) ([]byte, error) { + der, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return []byte{}, err + } + block := pem.Block{ + Type: PublicKeyBlockType, + Bytes: der, + } + return pem.EncodeToMemory(&block), nil +} + +// NewPrivateKey returns a new private key. +var NewPrivateKey = GeneratePrivateKey + +func GeneratePrivateKey(keyType x509.PublicKeyAlgorithm) (crypto.Signer, error) { + if keyType == x509.ECDSA { + return ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + } + + return rsa.GenerateKey(cryptorand.Reader, rsaKeySize) +} + +// NewSignedCert creates a signed certificate using the given CA certificate and key +func NewSignedCert(cfg *CertConfig, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer, isCA bool) (*x509.Certificate, error) { + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64)) + if err != nil { + return nil, err + } + if len(cfg.CommonName) == 0 { + return nil, errors.New("must specify a CommonName") + } + + keyUsage := x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature + if isCA { + keyUsage |= x509.KeyUsageCertSign + } + + RemoveDuplicateAltNames(&cfg.AltNames) + + notAfter := time.Now().Add(CertificateValidity).UTC() + if cfg.NotAfter != nil { + notAfter = *cfg.NotAfter + } + + certTmpl := x509.Certificate{ + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + SerialNumber: serial, + NotBefore: caCert.NotBefore, + NotAfter: notAfter, + KeyUsage: keyUsage, + ExtKeyUsage: cfg.Usages, + BasicConstraintsValid: true, + IsCA: isCA, + } + certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) + if err != nil { + return nil, err + } + return x509.ParseCertificate(certDERBytes) +} + +// RemoveDuplicateAltNames removes duplicate items in altNames. +func RemoveDuplicateAltNames(altNames *certutil.AltNames) { + if altNames == nil { + return + } + + if altNames.DNSNames != nil { + altNames.DNSNames = sets.NewString(altNames.DNSNames...).List() + } + + ipsKeys := make(map[string]struct{}) + var ips []net.IP + for _, one := range altNames.IPs { + if _, ok := ipsKeys[one.String()]; !ok { + ipsKeys[one.String()] = struct{}{} + ips = append(ips, one) + } + } + altNames.IPs = ips +} + +// ValidateCertPeriod checks if the certificate is valid relative to the current time +// (+/- offset) +func ValidateCertPeriod(cert *x509.Certificate, offset time.Duration) error { + period := fmt.Sprintf("NotBefore: %v, NotAfter: %v", cert.NotBefore, cert.NotAfter) + now := time.Now().Add(offset) + if now.Before(cert.NotBefore) { + return errors.Errorf("the certificate is not valid yet: %s", period) + } + if now.After(cert.NotAfter) { + return errors.Errorf("the certificate has expired: %s", period) + } + return nil +} + +// VerifyCertChain verifies that a certificate has a valid chain of +// intermediate CAs back to the root CA +func VerifyCertChain(cert *x509.Certificate, intermediates []*x509.Certificate, root *x509.Certificate) error { + rootPool := x509.NewCertPool() + rootPool.AddCert(root) + + intermediatePool := x509.NewCertPool() + for _, c := range intermediates { + intermediatePool.AddCert(c) + } + + verifyOptions := x509.VerifyOptions{ + Roots: rootPool, + Intermediates: intermediatePool, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + } + + if _, err := cert.Verify(verifyOptions); err != nil { + return err + } + + return nil +} From 56d3e8907f00017254f4005364ecdc1b4508a57b Mon Sep 17 00:00:00 2001 From: fabiankramm Date: Thu, 25 Nov 2021 16:22:48 +0100 Subject: [PATCH 2/3] feat: add k8s distro --- charts/k8s/.helmignore | 21 +++ charts/k8s/Chart.yaml | 15 ++ charts/k8s/templates/NOTES.txt | 8 + charts/k8s/templates/_helpers.tpl | 59 ++++++ charts/k8s/templates/api-deployment.yaml | 124 +++++++++++++ charts/k8s/templates/api-service.yaml | 22 +++ .../k8s/templates/controller-deployment.yaml | 116 ++++++++++++ charts/k8s/templates/etcd-service.yaml | 22 +++ .../templates/etcd-statefulset-service.yaml | 22 +++ charts/k8s/templates/etcd-statefulset.yaml | 133 +++++++++++++ charts/k8s/templates/ingress.yaml | 26 +++ .../k8s/templates/pre-install-hook-job.yaml | 45 +++++ .../k8s/templates/pre-install-hook-role.yaml | 15 ++ .../pre-install-hook-rolebinding.yaml | 19 ++ .../pre-install-hook-serviceaccount.yaml | 11 ++ charts/k8s/templates/rbac/clusterrole.yaml | 27 +++ .../templates/rbac/clusterrolebinding.yaml | 23 +++ charts/k8s/templates/rbac/role.yaml | 30 +++ charts/k8s/templates/rbac/rolebinding.yaml | 24 +++ charts/k8s/templates/serviceaccount.yaml | 16 ++ charts/k8s/templates/syncer-deployment.yaml | 119 ++++++++++++ charts/k8s/templates/syncer-service.yaml | 20 ++ charts/k8s/values.yaml | 161 ++++++++++++++++ cmd/vcluster/cmd/certs.go | 174 ++++++++++++++++++ cmd/vcluster/cmd/root.go | 1 + .../cmd/app/create/values/default.go | 4 +- pkg/certs/constants.go | 45 ++++- pkg/certs/kubeconfig.go | 7 - 28 files changed, 1292 insertions(+), 17 deletions(-) create mode 100644 charts/k8s/.helmignore create mode 100644 charts/k8s/Chart.yaml create mode 100644 charts/k8s/templates/NOTES.txt create mode 100644 charts/k8s/templates/_helpers.tpl create mode 100644 charts/k8s/templates/api-deployment.yaml create mode 100644 charts/k8s/templates/api-service.yaml create mode 100644 charts/k8s/templates/controller-deployment.yaml create mode 100644 charts/k8s/templates/etcd-service.yaml create mode 100644 charts/k8s/templates/etcd-statefulset-service.yaml create mode 100644 charts/k8s/templates/etcd-statefulset.yaml create mode 100644 charts/k8s/templates/ingress.yaml create mode 100644 charts/k8s/templates/pre-install-hook-job.yaml create mode 100644 charts/k8s/templates/pre-install-hook-role.yaml create mode 100644 charts/k8s/templates/pre-install-hook-rolebinding.yaml create mode 100644 charts/k8s/templates/pre-install-hook-serviceaccount.yaml create mode 100644 charts/k8s/templates/rbac/clusterrole.yaml create mode 100644 charts/k8s/templates/rbac/clusterrolebinding.yaml create mode 100644 charts/k8s/templates/rbac/role.yaml create mode 100644 charts/k8s/templates/rbac/rolebinding.yaml create mode 100644 charts/k8s/templates/serviceaccount.yaml create mode 100644 charts/k8s/templates/syncer-deployment.yaml create mode 100644 charts/k8s/templates/syncer-service.yaml create mode 100644 charts/k8s/values.yaml create mode 100644 cmd/vcluster/cmd/certs.go diff --git a/charts/k8s/.helmignore b/charts/k8s/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/k8s/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/k8s/Chart.yaml b/charts/k8s/Chart.yaml new file mode 100644 index 000000000..69f696627 --- /dev/null +++ b/charts/k8s/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +name: vcluster-k8s +description: vcluster - Virtual Kubernetes Clusters (k8s) + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +version: 0.0.1 # version is auto-generated by release pipeline diff --git a/charts/k8s/templates/NOTES.txt b/charts/k8s/templates/NOTES.txt new file mode 100644 index 000000000..c32a7790f --- /dev/null +++ b/charts/k8s/templates/NOTES.txt @@ -0,0 +1,8 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. + +To learn more about the release, try: + + $ helm status {{ .Release.Name }} + $ helm get all {{ .Release.Name }} \ No newline at end of file diff --git a/charts/k8s/templates/_helpers.tpl b/charts/k8s/templates/_helpers.tpl new file mode 100644 index 000000000..fe19f0d60 --- /dev/null +++ b/charts/k8s/templates/_helpers.tpl @@ -0,0 +1,59 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "vcluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "vcluster.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "vcluster.clusterRoleName" -}} +{{- printf "vc-%s-v-%s" .Release.Name .Release.Namespace | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "vcluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "vcluster.labels" -}} +app.kubernetes.io/name: {{ include "vcluster.name" . }} +helm.sh/chart: {{ include "vcluster.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- else }} +app.kubernetes.io/version: {{ .Chart.Version | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Get +*/}} +{{- $}} +{{- define "vcluster.admin.accessKey" -}} +{{- now | unixEpoch | toString | trunc 8 | sha256sum -}} +{{- end -}} \ No newline at end of file diff --git a/charts/k8s/templates/api-deployment.yaml b/charts/k8s/templates/api-deployment.yaml new file mode 100644 index 000000000..37bd28898 --- /dev/null +++ b/charts/k8s/templates/api-deployment.yaml @@ -0,0 +1,124 @@ +{{- if not .Values.api.disabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-api + namespace: {{ .Release.Namespace }} + labels: + app: vcluster-api + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.api.labels }} +{{ toYaml .Values.api.labels | indent 4 }} +{{- end }} + {{- if .Values.api.annotations }} + annotations: +{{ toYaml .Values.api.annotations | indent 4 }} + {{- end }} +spec: + replicas: {{ .Values.api.replicas }} + strategy: + rollingUpdate: + maxSurge: 1 + {{- if (eq (int .Values.api.replicas) 1) }} + maxUnavailable: 0 + {{- else }} + maxUnavailable: 1 + {{- end }} + type: RollingUpdate + selector: + matchLabels: + app: vcluster-api + release: {{ .Release.Name }} + template: + metadata: + labels: + app: vcluster-api + release: {{ .Release.Name }} + spec: + terminationGracePeriodSeconds: 10 + nodeSelector: +{{ toYaml .Values.api.nodeSelector | indent 8 }} + affinity: +{{ toYaml .Values.api.affinity | indent 8 }} + tolerations: +{{ toYaml .Values.api.tolerations | indent 8 }} + automountServiceAccountToken: false + volumes: + - name: certs + secret: + secretName: {{ .Release.Name }}-certs + {{- if .Values.api.volumes }} +{{ toYaml .Values.api.volumes | indent 8 }} + {{- end }} + containers: + - name: kube-apiserver + image: "{{ .Values.api.image }}" + command: + - kube-apiserver + - '--advertise-address=0.0.0.0' + - '--allow-privileged=true' + - '--authorization-mode=Node,RBAC' + - '--client-ca-file=/run/config/pki/ca.crt' + - '--enable-admission-plugins=NodeRestriction' + - '--enable-bootstrap-token-auth=true' + - '--etcd-cafile=/run/config/pki/etcd-ca.crt' + - '--etcd-certfile=/run/config/pki/apiserver-etcd-client.crt' + - '--etcd-keyfile=/run/config/pki/apiserver-etcd-client.key' + - '--etcd-servers=https://{{ .Release.Name }}-etcd:2379' + - '--insecure-port=0' + - '--kubelet-client-certificate=/run/config/pki/apiserver-kubelet-client.crt' + - '--kubelet-client-key=/run/config/pki/apiserver-kubelet-client.key' + - '--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname' + - '--proxy-client-cert-file=/run/config/pki/front-proxy-client.crt' + - '--proxy-client-key-file=/run/config/pki/front-proxy-client.key' + - '--requestheader-allowed-names=front-proxy-client' + - '--requestheader-client-ca-file=/run/config/pki/front-proxy-ca.crt' + - '--requestheader-extra-headers-prefix=X-Remote-Extra-' + - '--requestheader-group-headers=X-Remote-Group' + - '--requestheader-username-headers=X-Remote-User' + - '--secure-port=6443' + - '--service-account-issuer=https://kubernetes.default.svc.cluster.local' + - '--service-account-key-file=/run/config/pki/sa.pub' + - '--service-account-signing-key-file=/run/config/pki/sa.key' + - '--service-cluster-ip-range={{ .Values.serviceCIDR }}' + - '--tls-cert-file=/run/config/pki/apiserver.crt' + - '--tls-private-key-file=/run/config/pki/apiserver.key' + - '--watch-cache=false' + {{- range $f := .Values.api.extraArgs }} + - {{ $f | quote }} + {{- end }} + livenessProbe: + httpGet: + path: /livez + port: 6443 + scheme: HTTPS + initialDelaySeconds: 10 + timeoutSeconds: 15 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 8 + readinessProbe: + httpGet: + path: /readyz + port: 6443 + scheme: HTTPS + timeoutSeconds: 15 + periodSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + securityContext: +{{ toYaml .Values.api.securityContext | indent 10 }} + env: +{{ toYaml .Values.api.env | indent 10 }} + volumeMounts: + - mountPath: /run/config/pki + name: certs + readOnly: true + {{- if .Values.api.volumeMounts }} +{{ toYaml .Values.api.volumeMounts | indent 10 }} + {{- end }} + resources: +{{ toYaml .Values.api.resources | indent 10 }} +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/api-service.yaml b/charts/k8s/templates/api-service.yaml new file mode 100644 index 000000000..48ebed294 --- /dev/null +++ b/charts/k8s/templates/api-service.yaml @@ -0,0 +1,22 @@ +{{- if not .Values.api.disabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-api + namespace: {{ .Release.Namespace }} + labels: + app: vcluster-api + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: ClusterIP + ports: + - name: https + port: 443 + targetPort: 6443 + protocol: TCP + selector: + app: vcluster-api + release: {{ .Release.Name }} +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/controller-deployment.yaml b/charts/k8s/templates/controller-deployment.yaml new file mode 100644 index 000000000..137697305 --- /dev/null +++ b/charts/k8s/templates/controller-deployment.yaml @@ -0,0 +1,116 @@ +{{- if not .Values.controller.disabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-controller + namespace: {{ .Release.Namespace }} + labels: + app: vcluster-controller + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.controller.labels }} +{{ toYaml .Values.controller.labels | indent 4 }} +{{- end }} + {{- if .Values.controller.annotations }} + annotations: +{{ toYaml .Values.controller.annotations | indent 4 }} + {{- end }} +spec: + replicas: {{ .Values.controller.replicas }} + strategy: + rollingUpdate: + maxSurge: 1 + {{- if (eq (int .Values.controller.replicas) 1) }} + maxUnavailable: 0 + {{- else }} + maxUnavailable: 1 + {{- end }} + type: RollingUpdate + selector: + matchLabels: + app: vcluster-controller + release: {{ .Release.Name }} + template: + metadata: + labels: + app: vcluster-controller + release: {{ .Release.Name }} + spec: + terminationGracePeriodSeconds: 10 + nodeSelector: +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + affinity: +{{ toYaml .Values.controller.affinity | indent 8 }} + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + automountServiceAccountToken: false + volumes: + - name: certs + secret: + secretName: {{ .Release.Name }}-certs + {{- if .Values.controller.volumes }} +{{ toYaml .Values.controller.volumes | indent 8 }} + {{- end }} + containers: + - name: kube-controller-manager + image: "{{ .Values.controller.image }}" + command: + - kube-controller-manager + - '--authentication-kubeconfig=/run/config/pki/controller-manager.conf' + - '--authorization-kubeconfig=/run/config/pki/controller-manager.conf' + - '--bind-address=0.0.0.0' + - '--client-ca-file=/run/config/pki/ca.crt' + - '--cluster-name=kubernetes' + - '--cluster-signing-cert-file=/run/config/pki/ca.crt' + - '--cluster-signing-key-file=/run/config/pki/ca.key' + - '--controllers=*,-nodeipam,-nodelifecycle,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle' + - '--horizontal-pod-autoscaler-sync-period=60s' + - '--kubeconfig=/run/config/pki/controller-manager.conf' + - '--service-cluster-ip-range={{ .Values.serviceCIDR }}' + - '--leader-elect=false' + - '--node-monitor-grace-period=180s' + - '--node-monitor-period=30s' + - '--port=0' + - '--pvclaimbinder-sync-period=60s' + - '--requestheader-client-ca-file=/run/config/pki/front-proxy-ca.crt' + - '--root-ca-file=/run/config/pki/ca.crt' + - '--service-account-private-key-file=/run/config/pki/sa.key' + - '--use-service-account-credentials=true' + {{- range $f := .Values.controller.extraArgs }} + - {{ $f | quote }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: 10257 + scheme: HTTPS + initialDelaySeconds: 10 + timeoutSeconds: 15 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 8 + startupProbe: + httpGet: + path: /healthz + port: 10257 + scheme: HTTPS + initialDelaySeconds: 10 + timeoutSeconds: 15 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 24 + securityContext: +{{ toYaml .Values.controller.securityContext | indent 10 }} + env: +{{ toYaml .Values.controller.env | indent 10 }} + volumeMounts: + - mountPath: /run/config/pki + name: certs + readOnly: true + {{- if .Values.controller.volumeMounts }} +{{ toYaml .Values.controller.volumeMounts | indent 10 }} + {{- end }} + resources: +{{ toYaml .Values.controller.resources | indent 10 }} +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/etcd-service.yaml b/charts/k8s/templates/etcd-service.yaml new file mode 100644 index 000000000..6d8272f78 --- /dev/null +++ b/charts/k8s/templates/etcd-service.yaml @@ -0,0 +1,22 @@ +{{- if not .Values.etcd.disabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-etcd + namespace: {{ .Release.Namespace }} + labels: + app: vcluster-etcd + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: ClusterIP + ports: + - name: etcd + port: 2379 + targetPort: 2379 + protocol: TCP + selector: + app: vcluster-etcd + release: {{ .Release.Name }} + {{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/etcd-statefulset-service.yaml b/charts/k8s/templates/etcd-statefulset-service.yaml new file mode 100644 index 000000000..984e13bf3 --- /dev/null +++ b/charts/k8s/templates/etcd-statefulset-service.yaml @@ -0,0 +1,22 @@ +{{- if not .Values.etcd.disabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-etcd-headless + namespace: {{ .Release.Namespace }} + labels: + app: vcluster-etcd + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + ports: + - name: etcd + port: 2379 + targetPort: 2379 + protocol: TCP + clusterIP: None + selector: + app: vcluster-etcd + release: "{{ .Release.Name }}" +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/etcd-statefulset.yaml b/charts/k8s/templates/etcd-statefulset.yaml new file mode 100644 index 000000000..ca815a622 --- /dev/null +++ b/charts/k8s/templates/etcd-statefulset.yaml @@ -0,0 +1,133 @@ +{{- if not .Values.etcd.disabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .Release.Name }}-etcd + namespace: {{ .Release.Namespace }} + labels: + app: vcluster-etcd + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.etcd.labels }} +{{ toYaml .Values.etcd.labels | indent 4 }} +{{- end }} + {{- if .Values.etcd.annotations }} + annotations: +{{ toYaml .Values.etcd.annotations | indent 4 }} + {{- end }} +spec: + serviceName: {{ .Release.Name }}-etcd-headless + replicas: {{ .Values.etcd.replicas }} + selector: + matchLabels: + app: vcluster-etcd + release: {{ .Release.Name }} + {{- if .Values.etcd.storage.persistence }} + {{- if not .Values.etcd.storage.volumeClaimTemplates }} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.etcd.storage.className }} + resources: + requests: + storage: {{ .Values.etcd.storage.size }} + {{- else }} + volumeClaimTemplates: +{{ toYaml .Values.etcd.volumeClaimTemplates | indent 4 }} + {{- end }} + {{- end }} + template: + metadata: + labels: + app: vcluster-etcd + release: {{ .Release.Name }} + spec: + terminationGracePeriodSeconds: 10 + nodeSelector: +{{ toYaml .Values.etcd.nodeSelector | indent 8 }} + affinity: +{{ toYaml .Values.etcd.affinity | indent 8 }} + tolerations: +{{ toYaml .Values.etcd.tolerations | indent 8 }} + automountServiceAccountToken: false + volumes: + - name: certs + secret: + secretName: {{ .Release.Name }}-certs + {{- if .Values.volumes }} +{{ toYaml .Values.etcd.volumes | indent 8 }} + {{- end }} + {{- if not .Values.etcd.storage.persistence }} + - name: data + emptyDir: {} + {{- end }} + containers: + - name: etcd + image: "{{ .Values.etcd.image }}" + command: + - etcd + - '--advertise-client-urls=https://$(NAME).{{ .Release.Name }}-etcd.{{ .Release.Namespace }}:2379' + - '--cert-file=/run/config/pki/etcd-server.crt' + - '--client-cert-auth=true' + - '--data-dir=/var/lib/etcd' + - '--initial-advertise-peer-urls=https://$(NAME).{{ .Release.Name }}-etcd.{{ .Release.Namespace }}:2380' + - '--initial-cluster=$(NAME)=https://$(NAME).{{ .Release.Name }}-etcd.{{ .Release.Namespace }}:2380' + - '--key-file=/run/config/pki/etcd-server.key' + - '--listen-client-urls=https://0.0.0.1:2379,https://$(NAME).{{ .Release.Name }}-etcd.{{ .Release.Namespace }}:2379' + - '--listen-metrics-urls=http://0.0.0.1:2381' + - '--listen-peer-urls=https://$(NAME).{{ .Release.Name }}-etcd.{{ .Release.Namespace }}:2380' + - '--name=$(NAME)' + - '--peer-cert-file=/run/config/pki/etcd-peer.crt' + - '--peer-client-cert-auth=true' + - '--peer-key-file=/run/config/pki/etcd-peer.key' + - '--peer-trusted-ca-file=/run/config/pki/etcd-ca.crt' + - '--snapshot-count=10000' + - '--trusted-ca-file=/run/config/pki/etcd-ca.crt' + {{- range $f := .Values.etcd.extraArgs }} + - {{ $f | quote }} + {{- end }} + livenessProbe: + httpGet: + path: /health + port: 2381 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 15 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 8 + startupProbe: + httpGet: + path: /health + port: 2381 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 15 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 24 + securityContext: +{{ toYaml .Values.etcd.securityContext | indent 10 }} + env: + - name: NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.etcd.env }} +{{ toYaml .Values.etcd.env | indent 10 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /var/lib/etcd + - mountPath: /run/config/pki + name: certs + readOnly: true + {{- if .Values.etcd.volumeMounts }} +{{ toYaml .Values.etcd.volumeMounts | indent 10 }} + {{- end }} + resources: +{{ toYaml .Values.etcd.resources | indent 10 }} +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/ingress.yaml b/charts/k8s/templates/ingress.yaml new file mode 100644 index 000000000..0705358bb --- /dev/null +++ b/charts/k8s/templates/ingress.yaml @@ -0,0 +1,26 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ .Values.ingress.apiVersion }} +kind: Ingress +metadata: + {{- if .Values.ingress.annotations }} + annotations: + {{- toYaml .Values.ingress.annotations | nindent 4 }} + {{- end }} + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + - host: {{ .Values.ingress.host | quote }} + http: + paths: + - backend: + service: + name: {{ .Release.Name }} + port: + name: https + path: / + pathType: {{ .Values.ingress.pathType }} +{{- end }} diff --git a/charts/k8s/templates/pre-install-hook-job.yaml b/charts/k8s/templates/pre-install-hook-job.yaml new file mode 100644 index 000000000..38d68e0c1 --- /dev/null +++ b/charts/k8s/templates/pre-install-hook-job.yaml @@ -0,0 +1,45 @@ +{{- if .Values.preInstall.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-pre-install + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "3" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + backoffLimit: 3 + template: + metadata: + name: {{ .Release.Name }}-pre-install + spec: + serviceAccountName: {{ .Release.Name }}-pre-install + restartPolicy: OnFailure + nodeSelector: +{{ toYaml .Values.preInstall.nodeSelector | indent 8 }} + affinity: +{{ toYaml .Values.preInstall.affinity | indent 8 }} + tolerations: +{{ toYaml .Values.preInstall.tolerations | indent 8 }} + containers: + - name: certs + {{- if .Values.syncer.image }} + image: "{{ .Values.syncer.image }}" + {{- else }} + image: "loftsh/vcluster:{{ .Chart.Version }}" + {{- end }} + imagePullPolicy: IfNotPresent + securityContext: + runAsUser: 0 + command: + - /vcluster + - certs + args: + - --prefix={{ .Release.Name }} + {{- if .Values.serviceCIDR }} + - --service-cidr={{ .Values.serviceCIDR }} + {{- end }} + resources: +{{ toYaml .Values.etcd.resources | indent 12 }} +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/pre-install-hook-role.yaml b/charts/k8s/templates/pre-install-hook-role.yaml new file mode 100644 index 000000000..2bc7869bc --- /dev/null +++ b/charts/k8s/templates/pre-install-hook-role.yaml @@ -0,0 +1,15 @@ +{{- if .Values.preInstall.enabled }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }}-pre-install + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded,hook-failed +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "list"] +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/pre-install-hook-rolebinding.yaml b/charts/k8s/templates/pre-install-hook-rolebinding.yaml new file mode 100644 index 000000000..a96015ada --- /dev/null +++ b/charts/k8s/templates/pre-install-hook-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.preInstall.enabled }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }}-pre-install + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded,hook-failed +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-pre-install + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ .Release.Name }}-pre-install + apiGroup: rbac.authorization.k8s.io +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/pre-install-hook-serviceaccount.yaml b/charts/k8s/templates/pre-install-hook-serviceaccount.yaml new file mode 100644 index 000000000..dd1fe742b --- /dev/null +++ b/charts/k8s/templates/pre-install-hook-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.preInstall.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-pre-install + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded,hook-failed +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/rbac/clusterrole.yaml b/charts/k8s/templates/rbac/clusterrole.yaml new file mode 100644 index 000000000..bfda471c9 --- /dev/null +++ b/charts/k8s/templates/rbac/clusterrole.yaml @@ -0,0 +1,27 @@ +{{- if .Values.rbac.clusterRole.create }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "vcluster.clusterRoleName" . }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +rules: + - apiGroups: [""] + resources: ["nodes", "nodes/status"] + verbs: ["get", "watch", "list", "update", "patch"] + - apiGroups: [""] + resources: ["pods", "nodes/proxy", "nodes/metrics", "nodes/stats"] + verbs: ["get", "watch", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["create", "delete", "patch", "update", "get", "watch", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "watch", "list"] + - apiGroups: ["scheduling.k8s.io"] + resources: ["priorityclasses"] + verbs: ["create", "delete", "patch", "update", "get", "list", "watch"] +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/rbac/clusterrolebinding.yaml b/charts/k8s/templates/rbac/clusterrolebinding.yaml new file mode 100644 index 000000000..44606a040 --- /dev/null +++ b/charts/k8s/templates/rbac/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if .Values.rbac.clusterRole.create }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "vcluster.clusterRoleName" . }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +subjects: + - kind: ServiceAccount + {{- if .Values.serviceAccount.name }} + name: {{ .Values.serviceAccount.name }} + {{- else }} + name: vc-{{ .Release.Name }} + {{- end }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ template "vcluster.clusterRoleName" . }} + apiGroup: rbac.authorization.k8s.io +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/rbac/role.yaml b/charts/k8s/templates/rbac/role.yaml new file mode 100644 index 000000000..5afa1ed91 --- /dev/null +++ b/charts/k8s/templates/rbac/role.yaml @@ -0,0 +1,30 @@ +{{- if .Values.rbac.role.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +rules: + - apiGroups: [""] + resources: ["configmaps", "secrets", "services", "pods", "pods/attach", "pods/portforward", "pods/exec", "endpoints", "persistentvolumeclaims"] + verbs: ["create", "delete", "patch", "update", "get", "list", "watch"] + - apiGroups: [""] + resources: ["events", "pods/log"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["create", "delete", "patch", "update", "get", "list", "watch"] + - apiGroups: ["apps"] + resources: ["statefulsets", "replicasets", "deployments"] + verbs: ["get", "list", "watch"] +{{- if .Values.openshift.enable }} + - apiGroups: [""] + resources: ["endpoints/restricted"] + verbs: ["create"] +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/rbac/rolebinding.yaml b/charts/k8s/templates/rbac/rolebinding.yaml new file mode 100644 index 000000000..e751c0e41 --- /dev/null +++ b/charts/k8s/templates/rbac/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.role.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +subjects: + - kind: ServiceAccount + {{- if .Values.serviceAccount.name }} + name: {{ .Values.serviceAccount.name }} + {{- else }} + name: vc-{{ .Release.Name }} + {{- end }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ .Release.Name }} + apiGroup: rbac.authorization.k8s.io +{{- end }} \ No newline at end of file diff --git a/charts/k8s/templates/serviceaccount.yaml b/charts/k8s/templates/serviceaccount.yaml new file mode 100644 index 000000000..14a522afe --- /dev/null +++ b/charts/k8s/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vc-{{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.serviceAccount.imagePullSecrets }} +imagePullSecrets: +{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/k8s/templates/syncer-deployment.yaml b/charts/k8s/templates/syncer-deployment.yaml new file mode 100644 index 000000000..90d8a06b6 --- /dev/null +++ b/charts/k8s/templates/syncer-deployment.yaml @@ -0,0 +1,119 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.syncer.labels }} +{{ toYaml .Values.syncer.labels | indent 4 }} +{{- end }} + {{- if .Values.syncer.annotations }} + annotations: +{{ toYaml .Values.syncer.annotations | indent 4 }} + {{- end }} +spec: + replicas: {{ .Values.syncer.replicas }} + strategy: + rollingUpdate: + maxSurge: 1 + {{- if (eq (int .Values.syncer.replicas) 1) }} + maxUnavailable: 0 + {{- else }} + maxUnavailable: 1 + {{- end }} + type: RollingUpdate + selector: + matchLabels: + app: vcluster + release: {{ .Release.Name }} + template: + metadata: + labels: + app: vcluster + release: {{ .Release.Name }} + spec: + terminationGracePeriodSeconds: 10 + nodeSelector: +{{ toYaml .Values.syncer.nodeSelector | indent 8 }} + affinity: +{{ toYaml .Values.syncer.affinity | indent 8 }} + tolerations: +{{ toYaml .Values.syncer.tolerations | indent 8 }} + {{- if .Values.serviceAccount.name }} + serviceAccountName: {{ .Values.serviceAccount.name }} + {{- else }} + serviceAccountName: vc-{{ .Release.Name }} + {{- end }} + volumes: + - name: certs + secret: + secretName: {{ .Release.Name }}-certs + {{- if .Values.syncer.volumes }} +{{ toYaml .Values.syncer.volumes | indent 8 }} + {{- end }} + containers: + - name: syncer + {{- if .Values.syncer.image }} + image: "{{ .Values.syncer.image }}" + {{- else }} + image: "loftsh/vcluster:{{ .Chart.Version }}" + {{- end }} + {{- if .Values.syncer.workingDir }} + workingDir: {{ .Values.syncer.workingDir }} + {{- end }} + {{- if .Values.syncer.command }} + command: + {{- range $f := .Values.syncer.command }} + - {{ $f | quote }} + {{- end }} + {{- end }} + {{- if not .Values.syncer.noArgs }} + args: + - --service-name={{ .Release.Name }} + - --suffix={{ .Release.Name }} + - --set-owner + {{- if .Values.ingress.enabled }} + - --tls-san={{ .Values.ingress.host }} + {{- end }} + {{- range $f := .Values.syncer.extraArgs }} + - {{ $f | quote }} + {{- end }} + {{- else }} + args: +{{ toYaml .Values.syncer.extraArgs | indent 10 }} + {{- end }} + {{- if .Values.syncer.livenessProbe }} + {{- if .Values.syncer.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + failureThreshold: 10 + initialDelaySeconds: 60 + periodSeconds: 2 + {{- end }} + {{- end }} + {{- if .Values.syncer.readinessProbe }} + {{- if .Values.syncer.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /readyz + port: 8443 + scheme: HTTPS + failureThreshold: 30 + periodSeconds: 2 + {{- end }} + {{- end }} + securityContext: +{{ toYaml .Values.syncer.securityContext | indent 10 }} + env: +{{ toYaml .Values.syncer.env | indent 10 }} + volumeMounts: +{{ toYaml .Values.syncer.volumeMounts | indent 10 }} + resources: +{{ toYaml .Values.syncer.resources | indent 10 }} diff --git a/charts/k8s/templates/syncer-service.yaml b/charts/k8s/templates/syncer-service.yaml new file mode 100644 index 000000000..d574bb686 --- /dev/null +++ b/charts/k8s/templates/syncer-service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app: vcluster + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: {{ .Values.service.type }} + ports: + - name: https + port: 443 + targetPort: 8443 + protocol: TCP + selector: + app: vcluster + release: {{ .Release.Name }} diff --git a/charts/k8s/values.yaml b/charts/k8s/values.yaml new file mode 100644 index 000000000..75ad1466c --- /dev/null +++ b/charts/k8s/values.yaml @@ -0,0 +1,161 @@ +# Make sure the service-cidr is the exact service cidr of the host cluster. +# If this does not match, you won't be able to create services within the vcluster. You can find out +# the service cidr of the host cluster by creating a service with a not allowed ClusterIP in the host cluster. +# This will yield an error message in the form of: +# The Service "faulty-service" is invalid: spec.clusterIP: Invalid value: "1.1.1.1": provided IP is not in the valid range. The range of valid IPs is 10.96.0.0/12 +serviceCIDR: "10.96.0.0/12" + +# Syncer configuration +syncer: + # Image to use for the syncer + # image: loftsh/vcluster + extraArgs: + - --request-header-ca-cert=/pki/ca.crt + - --client-ca-cert=/pki/ca.crt + - --server-ca-cert=/pki/ca.crt + - --server-ca-key=/pki/ca.key + - --kube-config=/pki/admin.conf + volumeMounts: + - mountPath: /pki + name: certs + readOnly: true + env: [] + livenessProbe: + enabled: true + readinessProbe: + enabled: true + resources: + limits: + memory: 1Gi + requests: + cpu: 100m + memory: 128Mi + # Extra volumes + volumes: [] + # The amount of replicas to run the deployment with + replicas: 1 + # NodeSelector used to schedule the syncer + nodeSelector: {} + # Affinity to apply to the syncer deployment + affinity: {} + # Tolerations to apply to the syncer deployment + tolerations: [] + # Extra Labels for the syncer deployment + labels: {} + # Extra Annotations for the syncer deployment + annotations: {} + +# Etcd settings +etcd: + image: k8s.gcr.io/etcd:3.4.13-0 + # The amount of replicas to run + replicas: 1 + # NodeSelector used + nodeSelector: {} + # Affinity to apply + affinity: {} + # Tolerations to apply + tolerations: [] + # Extra Labels + labels: {} + # Extra Annotations + annotations: {} + resources: + requests: + cpu: 100m + memory: 100Mi + # Storage settings for the etcd + storage: + # If this is disabled, vcluster will use an emptyDir instead + # of a PersistentVolumeClaim + persistence: true + # Size of the persistent volume claim + size: 5Gi + # Optional StorageClass used for the pvc + # if empty default StorageClass defined in your host cluster will be used + #className: + +# Kubernetes Controller Manager settings +controller: + image: k8s.gcr.io/kube-controller-manager:v1.21.5 + # The amount of replicas to run the deployment with + replicas: 1 + # NodeSelector used + nodeSelector: {} + # Affinity to apply + affinity: {} + # Tolerations to apply + tolerations: [] + # Extra Labels + labels: {} + # Extra Annotations + annotations: {} + resources: + requests: + cpu: 200m + +# Kubernetes API Server settings +api: + image: k8s.gcr.io/kube-apiserver:v1.21.5 + extraArgs: [] + # The amount of replicas to run the deployment with + replicas: 1 + # NodeSelector used to schedule the syncer + nodeSelector: {} + # Affinity to apply to the syncer deployment + affinity: {} + # Tolerations to apply to the syncer deployment + tolerations: [] + # Extra Labels for the syncer deployment + labels: {} + # Extra Annotations for the syncer deployment + annotations: {} + resources: + requests: + cpu: 200m + +# Service account that should be used by the vcluster +serviceAccount: + create: true + # Optional name of the service account to use + # name: default + +# Roles & ClusterRoles for the vcluster +rbac: + clusterRole: + # Enable this to let the vcluster sync + # real nodes, storage classes and priority classes + create: false + role: + # This is required for basic functionality of vcluster + create: true + +# Service configurations +service: + type: ClusterIP + +# PreInstall hook configuration +preInstall: + enabled: true + +# Configure the ingress resource that allows you to access the vcluster +ingress: + # Enable ingress record generation + enabled: false + # Ingress path type + pathType: ImplementationSpecific + apiVersion: networking.k8s.io/v1 + ingressClassName: "" + host: vcluster.local + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + +# Set "enable" to true when running vcluster in an OpenShift host +# This will add an extra rule to the deployed role binding in order +# to manage service endpoints +openshift: + enable: false + \ No newline at end of file diff --git a/cmd/vcluster/cmd/certs.go b/cmd/vcluster/cmd/certs.go new file mode 100644 index 000000000..18c4278fa --- /dev/null +++ b/cmd/vcluster/cmd/certs.go @@ -0,0 +1,174 @@ +package cmd + +import ( + "context" + "github.com/loft-sh/vcluster/pkg/certs" + "github.com/loft-sh/vcluster/pkg/util/clienthelper" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "io/ioutil" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + "path/filepath" + ctrl "sigs.k8s.io/controller-runtime" + "strconv" + "strings" +) + +// CertsCmd holds the certs flags +type CertsCmd struct { + Prefix string + ServiceCIDR string + ClusterDomain string + ClusterName string + Namespace string + + CertificateDir string + EtcdReplicas int +} + +func NewCertsCommand() *cobra.Command { + options := &CertsCmd{} + cmd := &cobra.Command{ + Use: "certs", + Short: "Generates control plane certificates", + Args: cobra.NoArgs, + RunE: func(cobraCmd *cobra.Command, args []string) error { + return ExecuteCerts(options) + }, + } + + cmd.Flags().StringVar(&options.ClusterName, "cluster-name", "kubernetes", "The cluster name") + cmd.Flags().StringVar(&options.ClusterDomain, "cluster-domain", "cluster.local", "The cluster domain ending that should be used for the virtual cluster") + cmd.Flags().StringVar(&options.ServiceCIDR, "service-cidr", "10.96.0.0/12", "Service CIDR is the subnet used by k8s services") + cmd.Flags().StringVar(&options.Prefix, "prefix", "vcluster", "Release name and prefix for generating the assets") + cmd.Flags().StringVar(&options.Namespace, "namespace", "", "Namespace where to deploy the cert secret to") + cmd.Flags().StringVar(&options.CertificateDir, "certificate-dir", "certs", "The temporary directory where the certificates will be stored") + cmd.Flags().IntVar(&options.EtcdReplicas, "etcd-replicas", 1, "The etcd cluster size") + return cmd +} + +// write needed files to secret +var certMap = map[string]string{ + certs.AdminKubeConfigFileName: certs.AdminKubeConfigFileName, + certs.ControllerManagerKubeConfigFileName: certs.ControllerManagerKubeConfigFileName, + + certs.APIServerCertName: certs.APIServerCertName, + certs.APIServerKeyName: certs.APIServerKeyName, + + certs.APIServerEtcdClientCertName: certs.APIServerEtcdClientCertName, + certs.APIServerEtcdClientKeyName: certs.APIServerEtcdClientKeyName, + + certs.APIServerKubeletClientCertName: certs.APIServerKubeletClientCertName, + certs.APIServerKubeletClientKeyName: certs.APIServerKubeletClientKeyName, + + certs.CACertName: certs.CACertName, + certs.CAKeyName: certs.CAKeyName, + + certs.FrontProxyCACertName: certs.FrontProxyCACertName, + certs.FrontProxyCAKeyName: certs.FrontProxyCAKeyName, + + certs.FrontProxyClientCertName: certs.FrontProxyClientCertName, + certs.FrontProxyClientKeyName: certs.FrontProxyClientKeyName, + + certs.ServiceAccountPrivateKeyName: certs.ServiceAccountPrivateKeyName, + certs.ServiceAccountPublicKeyName: certs.ServiceAccountPublicKeyName, + + certs.EtcdCACertName: strings.Replace(certs.EtcdCACertName, "/", "-", -1), + certs.EtcdCAKeyName: strings.Replace(certs.EtcdCAKeyName, "/", "-", -1), + + certs.EtcdHealthcheckClientCertName: strings.Replace(certs.EtcdHealthcheckClientCertName, "/", "-", -1), + certs.EtcdHealthcheckClientKeyName: strings.Replace(certs.EtcdHealthcheckClientKeyName, "/", "-", -1), + + certs.EtcdPeerCertName: strings.Replace(certs.EtcdPeerCertName, "/", "-", -1), + certs.EtcdPeerKeyName: strings.Replace(certs.EtcdPeerKeyName, "/", "-", -1), + + certs.EtcdServerCertName: strings.Replace(certs.EtcdServerCertName, "/", "-", -1), + certs.EtcdServerKeyName: strings.Replace(certs.EtcdServerKeyName, "/", "-", -1), +} + +func ExecuteCerts(options *CertsCmd) error { + inClusterConfig := ctrl.GetConfigOrDie() + kubeClient, err := kubernetes.NewForConfig(inClusterConfig) + if err != nil { + return err + } + + // get current namespace + if options.Namespace == "" { + options.Namespace, err = clienthelper.CurrentNamespace() + if err != nil { + return err + } + } + + secretName := options.Prefix + "-certs" + _, err = kubeClient.CoreV1().Secrets(options.Namespace).Get(context.Background(), secretName, metav1.GetOptions{}) + if err == nil { + klog.Infof("Certs secret already exists, skip generation") + return nil + } + + cfg, err := certs.SetInitDynamicDefaults() + if err != nil { + return err + } + + // generate etcd server and peer sans + etcdService := options.Prefix + "-etcd" + serverSans := []string{etcdService, etcdService + "." + options.Namespace, etcdService + "." + options.Namespace + ".svc"} + for i := 0; i < options.EtcdReplicas; i++ { + hostname := etcdService + "-" + strconv.Itoa(i) + serverSans = append(serverSans, hostname, hostname+"."+etcdService, hostname+"."+etcdService+"."+options.Namespace) + } + + cfg.ClusterName = options.ClusterName + cfg.NodeRegistration.Name = options.Prefix + "-api" + cfg.Etcd.Local = &certs.LocalEtcd{ + ServerCertSANs: serverSans, + PeerCertSANs: serverSans, + } + cfg.Networking.ServiceSubnet = options.ServiceCIDR + cfg.Networking.DNSDomain = options.ClusterDomain + cfg.ControlPlaneEndpoint = options.Prefix + "-api" + cfg.CertificatesDir = options.CertificateDir + cfg.LocalAPIEndpoint.AdvertiseAddress = "0.0.0.0" + cfg.LocalAPIEndpoint.BindPort = 443 + err = certs.CreatePKIAssets(cfg) + if err != nil { + return errors.Wrap(err, "create pki assets") + } + + err = certs.CreateJoinControlPlaneKubeConfigFiles(cfg.CertificatesDir, cfg) + if err != nil { + return errors.Wrap(err, "create kube configs") + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: options.Namespace, + }, + Data: map[string][]byte{}, + } + + for fromName, toName := range certMap { + data, err := ioutil.ReadFile(filepath.Join(options.CertificateDir, fromName)) + if err != nil { + return errors.Wrap(err, "read "+fromName) + } + + secret.Data[toName] = data + } + + // finally create the secret + _, err = kubeClient.CoreV1().Secrets(options.Namespace).Create(context.Background(), secret, metav1.CreateOptions{}) + if err != nil { + return errors.Wrap(err, "create certs secret") + } + + klog.Infof("Successfully created certs secret %s/%s", options.Namespace, secretName) + return nil +} diff --git a/cmd/vcluster/cmd/root.go b/cmd/vcluster/cmd/root.go index 57f8db1bf..fe28c4501 100644 --- a/cmd/vcluster/cmd/root.go +++ b/cmd/vcluster/cmd/root.go @@ -21,5 +21,6 @@ func BuildRoot() *cobra.Command { // add top level commands rootCmd.AddCommand(NewStartCommand()) + rootCmd.AddCommand(NewCertsCommand()) return rootCmd } diff --git a/cmd/vclusterctl/cmd/app/create/values/default.go b/cmd/vclusterctl/cmd/app/create/values/default.go index d7ce3f22c..af3011677 100644 --- a/cmd/vclusterctl/cmd/app/create/values/default.go +++ b/cmd/vclusterctl/cmd/app/create/values/default.go @@ -9,7 +9,7 @@ import ( "strings" ) -var AllowedDistros = []string{"k3s", "k0s", "vanilla"} +var AllowedDistros = []string{"k3s", "k0s", "k8s"} func GetDefaultReleaseValues(client kubernetes.Interface, createOptions *create.CreateOptions, log log.Logger) (string, error) { if !contains(createOptions.Distro, AllowedDistros) { @@ -26,6 +26,8 @@ func GetDefaultReleaseValues(client kubernetes.Interface, createOptions *create. return getDefaultK3SReleaseValues(client, createOptions, log) } else if createOptions.Distro == "k0s" { return getDefaultK0SReleaseValues(client, createOptions, log) + } else if createOptions.Distro == "k8s" { + return "", nil } return "", errors.New("unrecognized distro " + createOptions.Distro) diff --git a/pkg/certs/constants.go b/pkg/certs/constants.go index 6b69677e7..8e043fc6d 100644 --- a/pkg/certs/constants.go +++ b/pkg/certs/constants.go @@ -19,10 +19,12 @@ import ( const ( // CertificateValidity defines the validity for all the signed certificates generated by kubeadm - CertificateValidity = time.Hour * 24 * 365 + CertificateValidity = time.Hour * 24 * 365 * 10 // CACertAndKeyBaseName defines certificate authority base name CACertAndKeyBaseName = "ca" + // CACertName defines certificate name + CACertName = "ca.crt" // CAKeyName defines certificate name CAKeyName = "ca.key" @@ -30,55 +32,84 @@ const ( APIServerCertAndKeyBaseName = "apiserver" // APIServerCertName defines API's server certificate name APIServerCertName = "apiserver.crt" + // APIServerKeyName defines API's server key name + APIServerKeyName = "apiserver.key" // APIServerCertCommonName defines API's server certificate common name (CN) APIServerCertCommonName = "kube-apiserver" // APIServerKubeletClientCertAndKeyBaseName defines kubelet client certificate and key base name APIServerKubeletClientCertAndKeyBaseName = "apiserver-kubelet-client" + // APIServerKubeletClientCertName defines kubelet client certificate name + APIServerKubeletClientCertName = "apiserver-kubelet-client.crt" + // APIServerKubeletClientKeyName defines kubelet client key name + APIServerKubeletClientKeyName = "apiserver-kubelet-client.key" // APIServerKubeletClientCertCommonName defines kubelet client certificate common name (CN) APIServerKubeletClientCertCommonName = "kube-apiserver-kubelet-client" // EtcdCACertAndKeyBaseName defines etcd's CA certificate and key base name EtcdCACertAndKeyBaseName = "etcd/ca" + // EtcdCACertName defines etcd's CA certificate name + EtcdCACertName = "etcd/ca.crt" + // EtcdCAKeyName defines etcd's CA key name + EtcdCAKeyName = "etcd/ca.key" // EtcdServerCertAndKeyBaseName defines etcd's server certificate and key base name EtcdServerCertAndKeyBaseName = "etcd/server" // EtcdServerCertName defines etcd's server certificate name EtcdServerCertName = "etcd/server.crt" + // EtcdServerKeyName defines etcd's server key name + EtcdServerKeyName = "etcd/server.key" // EtcdPeerCertAndKeyBaseName defines etcd's peer certificate and key base name EtcdPeerCertAndKeyBaseName = "etcd/peer" // EtcdPeerCertName defines etcd's peer certificate name EtcdPeerCertName = "etcd/peer.crt" + // EtcdPeerKeyName defines etcd's peer key name + EtcdPeerKeyName = "etcd/peer.key" // EtcdHealthcheckClientCertAndKeyBaseName defines etcd's healthcheck client certificate and key base name EtcdHealthcheckClientCertAndKeyBaseName = "etcd/healthcheck-client" + // EtcdHealthcheckClientCertName defines etcd's healthcheck client certificate name + EtcdHealthcheckClientCertName = "etcd/healthcheck-client.crt" + // EtcdHealthcheckClientKeyName defines etcd's healthcheck client key name + EtcdHealthcheckClientKeyName = "etcd/healthcheck-client.key" // EtcdHealthcheckClientCertCommonName defines etcd's healthcheck client certificate common name (CN) EtcdHealthcheckClientCertCommonName = "kube-etcd-healthcheck-client" // APIServerEtcdClientCertAndKeyBaseName defines apiserver's etcd client certificate and key base name APIServerEtcdClientCertAndKeyBaseName = "apiserver-etcd-client" + // APIServerEtcdClientCertName defines apiserver's etcd client certificate name + APIServerEtcdClientCertName = "apiserver-etcd-client.crt" + // APIServerEtcdClientKeyName defines apiserver's etcd client key name + APIServerEtcdClientKeyName = "apiserver-etcd-client.key" // APIServerEtcdClientCertCommonName defines apiserver's etcd client certificate common name (CN) APIServerEtcdClientCertCommonName = "kube-apiserver-etcd-client" // ServiceAccountKeyBaseName defines SA key base name ServiceAccountKeyBaseName = "sa" + // ServiceAccountPublicKeyName defines SA public key base name + ServiceAccountPublicKeyName = "sa.pub" // ServiceAccountPrivateKeyName defines SA private key base name ServiceAccountPrivateKeyName = "sa.key" // FrontProxyCACertAndKeyBaseName defines front proxy CA certificate and key base name FrontProxyCACertAndKeyBaseName = "front-proxy-ca" + // FrontProxyCACertName defines front proxy CA certificate name + FrontProxyCACertName = "front-proxy-ca.crt" + // FrontProxyCAKeyName defines front proxy CA key name + FrontProxyCAKeyName = "front-proxy-ca.key" + // FrontProxyClientCertAndKeyBaseName defines front proxy certificate and key base name FrontProxyClientCertAndKeyBaseName = "front-proxy-client" + // FrontProxyClientCertName defines front proxy certificate name + FrontProxyClientCertName = "front-proxy-client.crt" + // FrontProxyClientKeyName defines front proxy key name + FrontProxyClientKeyName = "front-proxy-client.key" // FrontProxyClientCertCommonName defines front proxy certificate common name FrontProxyClientCertCommonName = "front-proxy-client" //used as subject.commonname attribute (CN) // AdminKubeConfigFileName defines name for the kubeconfig aimed to be used by the superuser/admin of the cluster AdminKubeConfigFileName = "admin.conf" - - // KubeletKubeConfigFileName defines the file name for the kubeconfig that the control-plane kubelet will use for talking - // to the API server - KubeletKubeConfigFileName = "kubelet.conf" // ControllerManagerKubeConfigFileName defines the file name for the controller manager's kubeconfig file ControllerManagerKubeConfigFileName = "controller-manager.conf" // SchedulerKubeConfigFileName defines the file name for the scheduler's kubeconfig file @@ -93,10 +124,6 @@ const ( // SystemPrivilegedGroup defines the well-known group for the apiservers. This group is also superuser by default // (i.e. bound to the cluster-admin ClusterRole) SystemPrivilegedGroup = "system:masters" - // NodesGroup defines the well-known group for all nodes. - NodesGroup = "system:nodes" - // NodesUserPrefix defines the user name prefix as requested by the Node authorizer. - NodesUserPrefix = "system:node:" // DefaultAPIServerBindAddress is the default bind address for the API Server DefaultAPIServerBindAddress = "0.0.0.0" diff --git a/pkg/certs/kubeconfig.go b/pkg/certs/kubeconfig.go index 578c5f8e3..bd17070a4 100644 --- a/pkg/certs/kubeconfig.go +++ b/pkg/certs/kubeconfig.go @@ -272,13 +272,6 @@ func getKubeConfigSpecsBase(cfg *InitConfiguration) (map[string]*kubeConfigSpec, Organizations: []string{SystemPrivilegedGroup}, }, }, - KubeletKubeConfigFileName: { - APIServer: controlPlaneEndpoint, - ClientName: fmt.Sprintf("%s%s", NodesUserPrefix, cfg.NodeRegistration.Name), - ClientCertAuth: &clientCertAuth{ - Organizations: []string{NodesGroup}, - }, - }, ControllerManagerKubeConfigFileName: { APIServer: controlPlaneEndpoint, ClientName: ControllerManagerUser, From 1963f49a970d8042de1d253ffd70595ed298db1d Mon Sep 17 00:00:00 2001 From: fabiankramm Date: Thu, 25 Nov 2021 17:47:55 +0100 Subject: [PATCH 3/3] fix: k8s distro deployment improvements --- .dockerignore | 2 +- .github/workflows/release.yaml | 1 + Dockerfile | 12 +++---- charts/k8s/templates/etcd-service.yaml | 4 +++ .../templates/etcd-statefulset-service.yaml | 4 +++ charts/k8s/templates/etcd-statefulset.yaml | 34 +++++-------------- cmd/vcluster/cmd/certs.go | 2 +- devspace.yaml | 4 +-- devspace_start.sh | 2 +- 9 files changed, 25 insertions(+), 40 deletions(-) diff --git a/.dockerignore b/.dockerignore index e11069aae..0743d2943 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,7 +2,7 @@ /.devspace /.git /.vscode -/chart +/charts /api /kubeconfig.yaml /cmd/virtualclusterctl diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 0e6d6a588..6a25d439f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -89,6 +89,7 @@ jobs: helm repo add chartmuseum $CHART_MUSEUM_URL --username $CHART_MUSEUM_USER --password $CHART_MUSEUM_PASSWORD helm cm-push --force --version="$RELEASE_VERSION" charts/k3s/ chartmuseum helm cm-push --force --version="$RELEASE_VERSION" charts/k0s/ chartmuseum + helm cm-push --force --version="$RELEASE_VERSION" charts/k8s/ chartmuseum env: CHART_MUSEUM_URL: "https://charts.loft.sh/" CHART_MUSEUM_USER: ${{ secrets.CHART_MUSEUM_USER }} diff --git a/Dockerfile b/Dockerfile index 73aeeae8e..fe8b8784b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # Build the manager binary -FROM golang:1.16 as builder +FROM golang:1.17 as builder -WORKDIR /vcluster +WORKDIR /vcluster-dev ARG TARGETOS ARG TARGETARCH @@ -31,16 +31,12 @@ ENV DEBUG true RUN mkdir -p /.cache /.config ENV GOCACHE=/.cache ENV GOENV=/.config -# Ensure the default group(0) owns all files and folders in /vcluster and /.cache -# to allow sync to /vcluster with devspace and allow go to write into build cache even when run as non-root -RUN chgrp -R 0 /vcluster /.cache /.config && \ - chmod -R g=u /vcluster /.cache /.config # Set home to "/" in order to for kubectl to automatically pick up vcluster kube config ENV HOME / # Build cmd -RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GO111MODULE=on go build -mod vendor -o vcluster cmd/vcluster/main.go +RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GO111MODULE=on go build -mod vendor -o /vcluster cmd/vcluster/main.go ENTRYPOINT ["go", "run", "-mod", "vendor", "cmd/vcluster/main.go"] @@ -50,7 +46,7 @@ FROM alpine # Set root path as working directory WORKDIR / -COPY --from=builder /vcluster/vcluster . +COPY --from=builder /vcluster . COPY manifests/ /manifests/ ENTRYPOINT ["/vcluster", "start"] diff --git a/charts/k8s/templates/etcd-service.yaml b/charts/k8s/templates/etcd-service.yaml index 6d8272f78..650487c6e 100644 --- a/charts/k8s/templates/etcd-service.yaml +++ b/charts/k8s/templates/etcd-service.yaml @@ -16,6 +16,10 @@ spec: port: 2379 targetPort: 2379 protocol: TCP + - name: peer + port: 2380 + targetPort: 2380 + protocol: TCP selector: app: vcluster-etcd release: {{ .Release.Name }} diff --git a/charts/k8s/templates/etcd-statefulset-service.yaml b/charts/k8s/templates/etcd-statefulset-service.yaml index 984e13bf3..568ed816d 100644 --- a/charts/k8s/templates/etcd-statefulset-service.yaml +++ b/charts/k8s/templates/etcd-statefulset-service.yaml @@ -15,6 +15,10 @@ spec: port: 2379 targetPort: 2379 protocol: TCP + - name: peer + port: 2380 + targetPort: 2380 + protocol: TCP clusterIP: None selector: app: vcluster-etcd diff --git a/charts/k8s/templates/etcd-statefulset.yaml b/charts/k8s/templates/etcd-statefulset.yaml index ca815a622..4086e5501 100644 --- a/charts/k8s/templates/etcd-statefulset.yaml +++ b/charts/k8s/templates/etcd-statefulset.yaml @@ -69,16 +69,18 @@ spec: image: "{{ .Values.etcd.image }}" command: - etcd - - '--advertise-client-urls=https://$(NAME).{{ .Release.Name }}-etcd.{{ .Release.Namespace }}:2379' - '--cert-file=/run/config/pki/etcd-server.crt' - '--client-cert-auth=true' - '--data-dir=/var/lib/etcd' - - '--initial-advertise-peer-urls=https://$(NAME).{{ .Release.Name }}-etcd.{{ .Release.Namespace }}:2380' - - '--initial-cluster=$(NAME)=https://$(NAME).{{ .Release.Name }}-etcd.{{ .Release.Namespace }}:2380' + - '--advertise-client-urls=https://$(NAME).{{ .Release.Name }}-etcd-headless.{{ .Release.Namespace }}:2379' + - '--initial-advertise-peer-urls=https://$(NAME).{{ .Release.Name }}-etcd-headless.{{ .Release.Namespace }}:2380' + - '--initial-cluster=$(NAME)=https://$(NAME).{{ .Release.Name }}-etcd-headless.{{ .Release.Namespace }}:2380' + - '--listen-client-urls=https://0.0.0.0:2379' + - '--listen-metrics-urls=http://0.0.0.0:2381' + - '--listen-peer-urls=https://0.0.0.0:2380' + - '--initial-cluster-state=new' + - '--initial-cluster-token={{ .Release.Name }}' - '--key-file=/run/config/pki/etcd-server.key' - - '--listen-client-urls=https://0.0.0.1:2379,https://$(NAME).{{ .Release.Name }}-etcd.{{ .Release.Namespace }}:2379' - - '--listen-metrics-urls=http://0.0.0.1:2381' - - '--listen-peer-urls=https://$(NAME).{{ .Release.Name }}-etcd.{{ .Release.Namespace }}:2380' - '--name=$(NAME)' - '--peer-cert-file=/run/config/pki/etcd-peer.crt' - '--peer-client-cert-auth=true' @@ -89,26 +91,6 @@ spec: {{- range $f := .Values.etcd.extraArgs }} - {{ $f | quote }} {{- end }} - livenessProbe: - httpGet: - path: /health - port: 2381 - scheme: HTTP - initialDelaySeconds: 10 - timeoutSeconds: 15 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 8 - startupProbe: - httpGet: - path: /health - port: 2381 - scheme: HTTP - initialDelaySeconds: 10 - timeoutSeconds: 15 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 24 securityContext: {{ toYaml .Values.etcd.securityContext | indent 10 }} env: diff --git a/cmd/vcluster/cmd/certs.go b/cmd/vcluster/cmd/certs.go index 18c4278fa..1113edc99 100644 --- a/cmd/vcluster/cmd/certs.go +++ b/cmd/vcluster/cmd/certs.go @@ -121,7 +121,7 @@ func ExecuteCerts(options *CertsCmd) error { serverSans := []string{etcdService, etcdService + "." + options.Namespace, etcdService + "." + options.Namespace + ".svc"} for i := 0; i < options.EtcdReplicas; i++ { hostname := etcdService + "-" + strconv.Itoa(i) - serverSans = append(serverSans, hostname, hostname+"."+etcdService, hostname+"."+etcdService+"."+options.Namespace) + serverSans = append(serverSans, hostname, hostname+"."+etcdService+"-headless", hostname+"."+etcdService+"-headless"+"."+options.Namespace) } cfg.ClusterName = options.ClusterName diff --git a/devspace.yaml b/devspace.yaml index 7d977a8bd..fcdf37478 100644 --- a/devspace.yaml +++ b/devspace.yaml @@ -40,11 +40,9 @@ deployments: enabled: false image: ${SYNCER_IMAGE} noArgs: true - workingDir: /vcluster + workingDir: /vcluster-dev command: ["sleep"] extraArgs: ["99999999999"] - securityContext: - readOnlyRootFilesystem: false dev: terminal: imageSelector: ${SYNCER_IMAGE} diff --git a/devspace_start.sh b/devspace_start.sh index 57a3e7322..b26fe2619 100755 --- a/devspace_start.sh +++ b/devspace_start.sh @@ -4,7 +4,7 @@ set +e # Continue on errors COLOR_CYAN="\033[0;36m" COLOR_RESET="\033[0m" -RUN_CMD="go run -mod vendor cmd/vcluster/main.go" +RUN_CMD="go run -mod vendor cmd/vcluster/main.go start" DEBUG_CMD="dlv debug ./cmd/vcluster/main.go --listen=0.0.0.0:2345 --api-version=2 --output /tmp/__debug_bin --headless --build-flags=\"-mod=vendor\" -- --lease-duration=99999 --renew-deadline=99998" echo -e "${COLOR_CYAN}