diff --git a/_partials/_cluster_observability.mdx b/_partials/getting-started/_cluster_observability.mdx similarity index 100% rename from _partials/_cluster_observability.mdx rename to _partials/getting-started/_cluster_observability.mdx diff --git a/_partials/getting-started/_cluster_profile_import_aws.mdx b/_partials/getting-started/_cluster_profile_import_aws.mdx new file mode 100644 index 0000000000..73b80e5df3 --- /dev/null +++ b/_partials/getting-started/_cluster_profile_import_aws.mdx @@ -0,0 +1,109 @@ +--- +partial_category: getting-started +partial_name: import-hello-uni-aws +--- + +```json +{ + "metadata": { + "name": "aws-profile", + "description": "Cluster profile to deploy to AWS.", + "labels": {} + }, + "spec": { + "version": "1.0.0", + "template": { + "type": "cluster", + "cloudType": "aws", + "packs": [ + { + "name": "ubuntu-aws", + "type": "spectro", + "layer": "os", + "version": "22.04", + "tag": "22.04", + "values": "# Spectro Golden images includes most of the hardening as per CIS Ubuntu Linux 22.04 LTS Server L1 v1.0.0 standards\n\n# Uncomment below section to\n# 1. Include custom files to be copied over to the nodes and/or\n# 2. Execute list of commands before or after kubeadm init/join is executed\n#\n#kubeadmconfig:\n# preKubeadmCommands:\n# - echo \"Executing pre kube admin config commands\"\n# - update-ca-certificates\n# - 'systemctl restart containerd; sleep 3'\n# - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo \"Waiting for containerd...\"; sleep 1; done'\n# postKubeadmCommands:\n# - echo \"Executing post kube admin config commands\"\n# files:\n# - targetPath: /usr/local/share/ca-certificates/mycom.crt\n# targetOwner: \"root:root\"\n# targetPermissions: \"0644\"\n# content: |\n# -----BEGIN CERTIFICATE-----\n# MIICyzCCAbOgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl\n# cm5ldGVzMB4XDTIwMDkyMjIzNDMyM1oXDTMwMDkyMDIzNDgyM1owFTETMBEGA1UE\n# AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMdA\n# nZYs1el/6f9PgV/aO9mzy7MvqaZoFnqO7Qi4LZfYzixLYmMUzi+h8/RLPFIoYLiz\n# qiDn+P8c9I1uxB6UqGrBt7dkXfjrUZPs0JXEOX9U/6GFXL5C+n3AUlAxNCS5jobN\n# fbLt7DH3WoT6tLcQefTta2K+9S7zJKcIgLmBlPNDijwcQsbenSwDSlSLkGz8v6N2\n# 7SEYNCV542lbYwn42kbcEq2pzzAaCqa5uEPsR9y+uzUiJpv5tDHUdjbFT8tme3vL\n# 9EdCPODkqtMJtCvz0hqd5SxkfeC2L+ypaiHIxbwbWe7GtliROvz9bClIeGY7gFBK\n# jZqpLdbBVjo0NZBTJFUCAwEAAaMmMCQwDgYDVR0PAQH/BAQDAgKkMBIGA1UdEwEB\n# /wQIMAYBAf8CAQAwDQYJKoZIhvcNAQELBQADggEBADIKoE0P+aVJGV9LWGLiOhki\n# HFv/vPPAQ2MPk02rLjWzCaNrXD7aPPgT/1uDMYMHD36u8rYyf4qPtB8S5REWBM/Y\n# g8uhnpa/tGsaqO8LOFj6zsInKrsXSbE6YMY6+A8qvv5lPWpJfrcCVEo2zOj7WGoJ\n# ixi4B3fFNI+wih8/+p4xW+n3fvgqVYHJ3zo8aRLXbXwztp00lXurXUyR8EZxyR+6\n# b+IDLmHPEGsY9KOZ9VLLPcPhx5FR9njFyXvDKmjUMJJgUpRkmsuU1mCFC+OHhj56\n# IkLaSJf6z/p2a3YjTxvHNCqFMLbJ2FvJwYCRzsoT2wm2oulnUAMWPI10vdVM+Nc=\n# -----END CERTIFICATE-----", + "registry": { + "metadata": { + "uid": "5eecc89d0b150045ae661cef", + "name": "Public Repo", + "kind": "pack", + "isPrivate": false, + "providerType": "" + } + } + }, + { + "name": "kubernetes", + "type": "spectro", + "layer": "k8s", + "version": "1.27.15", + "tag": "1.27.x", + "values": "# spectrocloud.com/enabled-presets: Kube Controller Manager:loopback-ctrlmgr,Kube Scheduler:loopback-scheduler\npack:\n content:\n images:\n - image: registry.k8s.io/coredns/coredns:v1.10.1\n - image: registry.k8s.io/etcd:3.5.12-0\n - image: registry.k8s.io/kube-apiserver:v1.27.15\n - image: registry.k8s.io/kube-controller-manager:v1.27.15\n - image: registry.k8s.io/kube-proxy:v1.27.15\n - image: registry.k8s.io/kube-scheduler:v1.27.15\n - image: registry.k8s.io/pause:3.9\n - image: registry.k8s.io/pause:3.8\n #CIDR Range for Pods in cluster\n # Note : This must not overlap with any of the host or service network\n podCIDR: \"192.168.0.0/16\"\n #CIDR notation IP range from which to assign service cluster IPs\n # Note : This must not overlap with any IP ranges assigned to nodes for pods.\n serviceClusterIpRange: \"10.96.0.0/12\"\n # serviceDomain: \"cluster.local\"\n\nkubeadmconfig:\n apiServer:\n extraArgs:\n # Note : secure-port flag is used during kubeadm init. Do not change this flag on a running cluster\n secure-port: \"6443\"\n anonymous-auth: \"true\"\n profiling: \"false\"\n disable-admission-plugins: \"AlwaysAdmit\"\n default-not-ready-toleration-seconds: \"60\"\n default-unreachable-toleration-seconds: \"60\"\n enable-admission-plugins: \"AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity\"\n admission-control-config-file: \"/etc/kubernetes/pod-security-standard.yaml\"\n audit-log-path: /var/log/apiserver/audit.log\n audit-policy-file: /etc/kubernetes/audit-policy.yaml\n audit-log-maxage: \"30\"\n audit-log-maxbackup: \"10\"\n audit-log-maxsize: \"100\"\n authorization-mode: RBAC,Node\n tls-cipher-suites: \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\"\n extraVolumes:\n - name: audit-log\n hostPath: /var/log/apiserver\n mountPath: /var/log/apiserver\n pathType: DirectoryOrCreate\n - name: audit-policy\n hostPath: /etc/kubernetes/audit-policy.yaml\n mountPath: /etc/kubernetes/audit-policy.yaml\n readOnly: true\n pathType: File\n - name: pod-security-standard\n hostPath: /etc/kubernetes/pod-security-standard.yaml\n mountPath: /etc/kubernetes/pod-security-standard.yaml\n readOnly: true\n pathType: File\n controllerManager:\n extraArgs:\n profiling: \"false\"\n terminated-pod-gc-threshold: \"25\"\n use-service-account-credentials: \"true\"\n feature-gates: \"RotateKubeletServerCertificate=true\"\n scheduler:\n extraArgs:\n profiling: \"false\"\n kubeletExtraArgs:\n read-only-port : \"0\"\n event-qps: \"0\"\n feature-gates: \"RotateKubeletServerCertificate=true\"\n protect-kernel-defaults: \"true\"\n rotate-server-certificates: \"true\"\n tls-cipher-suites: \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\"\n files:\n - path: hardening/audit-policy.yaml\n targetPath: /etc/kubernetes/audit-policy.yaml\n targetOwner: \"root:root\"\n targetPermissions: \"0600\"\n - path: hardening/90-kubelet.conf\n targetPath: /etc/sysctl.d/90-kubelet.conf\n targetOwner: \"root:root\"\n targetPermissions: \"0600\"\n - targetPath: /etc/kubernetes/pod-security-standard.yaml\n targetOwner: \"root:root\"\n targetPermissions: \"0600\"\n content: |\n apiVersion: apiserver.config.k8s.io/v1\n kind: AdmissionConfiguration\n plugins:\n - name: PodSecurity\n configuration:\n apiVersion: pod-security.admission.config.k8s.io/v1\n kind: PodSecurityConfiguration\n defaults:\n enforce: \"baseline\"\n enforce-version: \"v1.27\"\n audit: \"baseline\"\n audit-version: \"v1.27\"\n warn: \"restricted\"\n warn-version: \"v1.27\"\n audit: \"restricted\"\n audit-version: \"v1.27\"\n exemptions:\n # Array of authenticated usernames to exempt.\n usernames: []\n # Array of runtime class names to exempt.\n runtimeClasses: []\n # Array of namespaces to exempt.\n namespaces: [kube-system]\n\n preKubeadmCommands:\n # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required\n - 'echo \"====> Applying kernel parameters for Kubelet\"'\n - 'sysctl -p /etc/sysctl.d/90-kubelet.conf'\n postKubeadmCommands:\n - 'chmod 600 /var/lib/kubelet/config.yaml'\n #- 'echo \"List of post kubeadm commands to be executed\"'\n\n# Client configuration to add OIDC based authentication flags in kubeconfig\n#clientConfig:\n #oidc-issuer-url: \"{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}\"\n #oidc-client-id: \"{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}\"\n #oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv\n #oidc-extra-scope: profile,email", + "registry": { + "metadata": { + "uid": "5eecc89d0b150045ae661cef", + "name": "Public Repo", + "kind": "pack", + "isPrivate": false, + "providerType": "" + } + } + }, + { + "name": "cni-calico", + "type": "spectro", + "layer": "cni", + "version": "3.28.0", + "tag": "3.28.0", + "values": "# spectrocloud.com/enabled-presets: Microk8s:microk8s-false\npack:\n content:\n images:\n - image: gcr.io/spectro-images-public/packs/calico/3.28.0/cni:v3.28.0\n - image: gcr.io/spectro-images-public/packs/calico/3.28.0/node:v3.28.0\n - image: gcr.io/spectro-images-public/packs/calico/3.28.0/kube-controllers:v3.28.0\n\nmanifests:\n calico:\n microk8s: \"false\"\n images:\n cni: \"\"\n node: \"\"\n kubecontroller: \"\"\n # IPAM type to use. Supported types are calico-ipam, host-local\n ipamType: \"calico-ipam\"\n\n calico_ipam:\n assign_ipv4: true\n assign_ipv6: false\n\n # Should be one of CALICO_IPV4POOL_IPIP or CALICO_IPV4POOL_VXLAN \n encapsulationType: \"CALICO_IPV4POOL_IPIP\"\n\n # Should be one of Always, CrossSubnet, Never\n encapsulationMode: \"Always\"\n\n env:\n # Additional env variables for calico-node\n calicoNode:\n #IPV6: \"autodetect\"\n #FELIX_IPV6SUPPORT: \"true\"\n #CALICO_IPV6POOL_NAT_OUTGOING: \"true\"\n #CALICO_IPV4POOL_CIDR: \"192.168.0.0/16\"\n #IP_AUTODETECTION_METHOD: \"first-found\"\n\n # Additional env variables for calico-kube-controller deployment\n calicoKubeControllers:\n #LOG_LEVEL: \"info\"\n #SYNC_NODE_LABELS: \"true\"", + "registry": { + "metadata": { + "uid": "5eecc89d0b150045ae661cef", + "name": "Public Repo", + "kind": "pack", + "isPrivate": false, + "providerType": "" + } + } + }, + { + "name": "csi-aws-ebs", + "type": "spectro", + "layer": "csi", + "version": "1.30.0", + "tag": "1.30.0", + "values": "# spectrocloud.com/enabled-presets: Microk8s:microk8s-false\npack:\n content:\n images:\n - image: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/aws-ebs-csi-driver:v1.30.0\n - image: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/external-provisioner:v4.0.1-eks-1-30-2\n - image: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/external-attacher:v4.5.1-eks-1-30-2\n - image: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/external-resizer:v1.10.1-eks-1-30-2\n - image: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/livenessprobe:v2.12.0-eks-1-30-2\n - image: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/node-driver-registrar:v2.10.1-eks-1-30-2\n - image: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/external-snapshotter/csi-snapshotter:v7.0.2-eks-1-30-2\n - image: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/volume-modifier-for-k8s:v0.3.0\n charts:\n - repo: https://kubernetes-sigs.github.io/aws-ebs-csi-driver \n name: aws-ebs-csi-driver\n version: 2.30.0\n namespace: \"kube-system\"\n\ncharts:\n aws-ebs-csi-driver:\n storageClasses: \n # Default Storage Class\n - name: spectro-storage-class\n # annotation metadata\n annotations:\n storageclass.kubernetes.io/is-default-class: \"true\"\n # label metadata\n # labels:\n # my-label-is: supercool\n # defaults to WaitForFirstConsumer\n volumeBindingMode: WaitForFirstConsumer\n # defaults to Delete\n reclaimPolicy: Delete\n parameters:\n # File system type: xfs, ext2, ext3, ext4\n csi.storage.k8s.io/fstype: \"ext4\"\n # EBS volume type: io1, io2, gp2, gp3, sc1, st1, standard\n type: \"gp2\"\n # I/O operations per second per GiB. Required when io1 or io2 volume type is specified.\n # iopsPerGB: \"\"\n # Applicable only when io1 or io2 volume type is specified\n # allowAutoIOPSPerGBIncrease: false\n # I/O operations per second. Applicable only for gp3 volumes.\n # iops: \"\"\n # Throughput in MiB/s. Applicable only for gp3 volumes.\n # throughput: \"\"\n # Whether the volume should be encrypted or not\n # encrypted: \"\"\n # The full ARN of the key to use when encrypting the volume. When not specified, the default KMS key is used.\n # kmsKeyId: \"\"\n # Additional Storage Class \n # - name: addon-storage-class\n # annotations:\n # storageclass.kubernetes.io/is-default-class: \"false\"\n # labels:\n # my-label-is: supercool\n # volumeBindingMode: WaitForFirstConsumer\n # reclaimPolicy: Delete\n # parameters:\n # csi.storage.k8s.io/fstype: \"ext4\"\n # type: \"gp2\"\n # iopsPerGB: \"\"\n # allowAutoIOPSPerGBIncrease: false\n # iops: \"\"\n # throughput: \"\"\n # encrypted: \"\"\n # kmsKeyId: \"\"\n\n image:\n repository: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/aws-ebs-csi-driver\n # Overrides the image tag whose default is v{{ .Chart.AppVersion }}\n tag: \"v1.30.0\"\n pullPolicy: IfNotPresent\n \n # -- Custom labels to add into metadata\n customLabels:\n {}\n # k8s-app: aws-ebs-csi-driver\n \n sidecars:\n provisioner:\n env: []\n image:\n pullPolicy: IfNotPresent\n repository: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/external-provisioner\n tag: \"v4.0.1-eks-1-30-2\"\n logLevel: 2\n # Additional parameters provided by external-provisioner.\n additionalArgs: []\n # Grant additional permissions to external-provisioner\n additionalClusterRoleRules:\n resources: {}\n # Tune leader lease election for csi-provisioner.\n # Leader election is on by default.\n leaderElection:\n enabled: true\n # Optional values to tune lease behavior.\n # The arguments provided must be in an acceptable time.ParseDuration format.\n # Ref: https://pkg.go.dev/flag#Duration\n # leaseDuration: \"15s\"\n # renewDeadline: \"10s\"\n # retryPeriod: \"5s\"\n securityContext:\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: false\n attacher:\n env: []\n image:\n pullPolicy: IfNotPresent\n repository: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/external-attacher\n tag: \"v4.5.1-eks-1-30-2\"\n # Tune leader lease election for csi-attacher.\n # Leader election is on by default.\n leaderElection:\n enabled: true\n # Optional values to tune lease behavior.\n # The arguments provided must be in an acceptable time.ParseDuration format.\n # Ref: https://pkg.go.dev/flag#Duration\n # leaseDuration: \"15s\"\n # renewDeadline: \"10s\"\n # retryPeriod: \"5s\"\n logLevel: 2\n # Additional parameters provided by external-attacher.\n additionalArgs: []\n # Grant additional permissions to external-attacher\n additionalClusterRoleRules: []\n resources: {}\n securityContext:\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: false\n snapshotter:\n # Enables the snapshotter sidecar even if the snapshot CRDs are not installed\n forceEnable: false\n env: []\n image:\n pullPolicy: IfNotPresent\n repository: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/external-snapshotter/csi-snapshotter\n tag: \"v7.0.2-eks-1-30-2\"\n logLevel: 2\n # Additional parameters provided by csi-snapshotter.\n additionalArgs: []\n # Grant additional permissions to csi-snapshotter\n additionalClusterRoleRules: []\n resources: {}\n securityContext:\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: false\n livenessProbe:\n image:\n pullPolicy: IfNotPresent\n repository: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/livenessprobe\n tag: \"v2.12.0-eks-1-30-2\"\n # Additional parameters provided by livenessprobe.\n additionalArgs: []\n resources: {}\n securityContext:\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: false\n resizer:\n env: []\n image:\n pullPolicy: IfNotPresent\n repository: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/external-resizer\n tag: \"v1.10.1-eks-1-30-2\"\n # Tune leader lease election for csi-resizer.\n # Leader election is on by default.\n leaderElection:\n enabled: true\n # Optional values to tune lease behavior.\n # The arguments provided must be in an acceptable time.ParseDuration format.\n # Ref: https://pkg.go.dev/flag#Duration\n # leaseDuration: \"15s\"\n # renewDeadline: \"10s\"\n # retryPeriod: \"5s\"\n logLevel: 2\n # Additional parameters provided by external-resizer.\n additionalArgs: []\n # Grant additional permissions to external-resizer\n additionalClusterRoleRules: []\n resources: {}\n securityContext:\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: false\n nodeDriverRegistrar:\n env: []\n image:\n pullPolicy: IfNotPresent\n repository: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/node-driver-registrar\n tag: \"v2.10.1-eks-1-30-2\"\n logLevel: 2\n # Additional parameters provided by node-driver-registrar.\n additionalArgs: []\n resources: {}\n securityContext:\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: false\n livenessProbe:\n exec:\n command:\n - /csi-node-driver-registrar\n - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)\n - --mode=kubelet-registration-probe\n initialDelaySeconds: 30\n periodSeconds: 90\n timeoutSeconds: 15\n volumemodifier:\n env: []\n image:\n pullPolicy: IfNotPresent\n repository: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.30.0/volume-modifier-for-k8s\n tag: \"v0.3.0\"\n leaderElection:\n enabled: true\n # Optional values to tune lease behavior.\n # The arguments provided must be in an acceptable time.ParseDuration format.\n # Ref: https://pkg.go.dev/flag#Duration\n # leaseDuration: \"15s\"\n # renewDeadline: \"10s\"\n # retryPeriod: \"5s\"\n logLevel: 2\n # Additional parameters provided by volume-modifier-for-k8s.\n additionalArgs: []\n resources: {}\n securityContext:\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: false\n \n proxy:\n http_proxy:\n no_proxy:\n \n imagePullSecrets: []\n nameOverride:\n fullnameOverride:\n \n awsAccessSecret:\n name: aws-secret\n keyId: key_id\n accessKey: access_key\n \n controller:\n batching: true\n volumeModificationFeature:\n enabled: false\n # Additional parameters provided by aws-ebs-csi-driver controller.\n additionalArgs: []\n sdkDebugLog: false\n loggingFormat: text\n affinity:\n nodeAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 1\n preference:\n matchExpressions:\n - key: eks.amazonaws.com/compute-type\n operator: NotIn\n values:\n - fargate\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - podAffinityTerm:\n labelSelector:\n matchExpressions:\n - key: app\n operator: In\n values:\n - ebs-csi-controller\n topologyKey: kubernetes.io/hostname\n weight: 100\n # The default filesystem type of the volume to provision when fstype is unspecified in the StorageClass.\n # If the default is not set and fstype is unset in the StorageClass, then no fstype will be set\n defaultFsType: ext4\n env: []\n # Use envFrom to reference ConfigMaps and Secrets across all containers in the deployment\n envFrom: []\n # If set, add pv/pvc metadata to plugin create requests as parameters.\n extraCreateMetadata: true\n # Extra volume tags to attach to each dynamically provisioned volume.\n # ---\n # extraVolumeTags:\n # key1: value1\n # key2: value2\n extraVolumeTags: {}\n httpEndpoint:\n # (deprecated) The TCP network address where the prometheus metrics endpoint\n # will run (example: `:8080` which corresponds to port 8080 on local host).\n # The default is empty string, which means metrics endpoint is disabled.\n # ---\n enableMetrics: false\n serviceMonitor:\n # Enables the ServiceMonitor resource even if the prometheus-operator CRDs are not installed\n forceEnable: false\n # Additional labels for ServiceMonitor object\n labels:\n release: prometheus\n # If set to true, AWS API call metrics will be exported to the following\n # TCP endpoint: \"0.0.0.0:3301\"\n # ---\n # ID of the Kubernetes cluster used for tagging provisioned EBS volumes (optional).\n k8sTagClusterId:\n logLevel: 2\n userAgentExtra: \"helm\"\n nodeSelector: {}\n deploymentAnnotations: {}\n podAnnotations: {}\n podLabels: {}\n priorityClassName: system-cluster-critical\n # AWS region to use. If not specified then the region will be looked up via the AWS EC2 metadata\n # service.\n # ---\n # region: us-east-1\n region:\n replicaCount: 2\n revisionHistoryLimit: 10\n socketDirVolume:\n emptyDir: {}\n updateStrategy:\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n # type: RollingUpdate\n # rollingUpdate:\n # maxSurge: 0\n # maxUnavailable: 1\n resources:\n requests:\n cpu: 10m\n memory: 40Mi\n limits:\n cpu: 100m\n memory: 256Mi\n serviceAccount:\n # A service account will be created for you if set to true. Set to false if you want to use your own.\n create: true\n name: ebs-csi-controller-sa\n annotations: {}\n ## Enable if EKS IAM for SA is used\n # eks.amazonaws.com/role-arn: arn::iam:::role/ebs-csi-role\n automountServiceAccountToken: true\n tolerations:\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n tolerationSeconds: 300\n # TSCs without the label selector stanza\n #\n # Example:\n #\n # topologySpreadConstraints:\n # - maxSkew: 1\n # topologyKey: topology.kubernetes.io/zone\n # whenUnsatisfiable: ScheduleAnyway\n # - maxSkew: 1\n # topologyKey: kubernetes.io/hostname\n # whenUnsatisfiable: ScheduleAnyway\n topologySpreadConstraints: []\n # securityContext on the controller pod\n securityContext:\n runAsNonRoot: true\n runAsUser: 1000\n runAsGroup: 1000\n fsGroup: 1000\n # Add additional volume mounts on the controller with controller.volumes and controller.volumeMounts\n volumes: []\n # Add additional volumes to be mounted onto the controller:\n # - name: custom-dir\n # hostPath:\n # path: /path/to/dir\n # type: Directory\n volumeMounts: []\n # And add mount paths for those additional volumes:\n # - name: custom-dir\n # mountPath: /mount/path\n # ---\n # securityContext on the controller container (see sidecars for securityContext on sidecar containers)\n containerSecurityContext:\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: false\n initContainers: []\n # containers to be run before the controller's container starts.\n #\n # Example:\n #\n # - name: wait\n # image: busybox\n # command: [ 'sh', '-c', \"sleep 20\" ]\n # Enable opentelemetry tracing for the plugin running on the daemonset\n otelTracing: {}\n # otelServiceName: ebs-csi-controller\n # otelExporterEndpoint: \"http://localhost:4317\"\n \n node:\n env: []\n envFrom: []\n kubeletPath: /var/lib/kubelet\n loggingFormat: text\n logLevel: 2\n priorityClassName:\n additionalArgs: []\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: eks.amazonaws.com/compute-type\n operator: NotIn\n values:\n - fargate\n - key: node.kubernetes.io/instance-type\n operator: NotIn\n values:\n - a1.medium\n - a1.large\n - a1.xlarge\n - a1.2xlarge\n - a1.4xlarge\n nodeSelector: {}\n daemonSetAnnotations: {}\n podAnnotations: {}\n podLabels: {}\n tolerateAllTaints: true\n tolerations:\n - operator: Exists\n effect: NoExecute\n tolerationSeconds: 300\n resources:\n requests:\n cpu: 10m\n memory: 40Mi\n limits:\n cpu: 100m\n memory: 256Mi\n revisionHistoryLimit: 10\n probeDirVolume:\n emptyDir: {}\n serviceAccount:\n create: true\n name: ebs-csi-node-sa\n annotations: {}\n ## Enable if EKS IAM for SA is used\n # eks.amazonaws.com/role-arn: arn::iam:::role/ebs-csi-role\n automountServiceAccountToken: true\n # Enable the linux daemonset creation\n enableLinux: true\n enableWindows: false\n # The number of attachment slots to reserve for system use (and not to be used for CSI volumes)\n # When this parameter is not specified (or set to -1), the EBS CSI Driver will attempt to determine the number of reserved slots via heuristic\n # Cannot be specified at the same time as `node.volumeAttachLimit`\n reservedVolumeAttachments:\n # The \"maximum number of attachable volumes\" per node\n # Cannot be specified at the same time as `node.reservedVolumeAttachments`\n volumeAttachLimit:\n updateStrategy:\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: \"10%\"\n hostNetwork: false\n # securityContext on the node pod\n securityContext:\n # The node pod must be run as root to bind to the registration/driver sockets\n runAsNonRoot: false\n runAsUser: 0\n runAsGroup: 0\n fsGroup: 0\n # Add additional volume mounts on the node pods with node.volumes and node.volumeMounts\n volumes: []\n # Add additional volumes to be mounted onto the node pods:\n # - name: custom-dir\n # hostPath:\n # path: /path/to/dir\n # type: Directory\n volumeMounts: []\n # And add mount paths for those additional volumes:\n # - name: custom-dir\n # mountPath: /mount/path\n # ---\n # securityContext on the node container (see sidecars for securityContext on sidecar containers)\n containerSecurityContext:\n readOnlyRootFilesystem: true\n privileged: true\n # Enable opentelemetry tracing for the plugin running on the daemonset\n otelTracing: {}\n # otelServiceName: ebs-csi-node\n # otelExporterEndpoint: \"http://localhost:4317\"\n \n additionalDaemonSets:\n # Additional node DaemonSets, using the node config structure\n # See docs/additional-daemonsets.md for more information\n #\n # example:\n # nodeSelector:\n # node.kubernetes.io/instance-type: c5.large\n # volumeAttachLimit: 15\n \n # Enable compatibility for the A1 instance family via use of an AL2-based image in a separate DaemonSet\n # a1CompatibilityDaemonSet: true\n \n # storageClasses: []\n # Add StorageClass resources like:\n # - name: ebs-sc\n # # annotation metadata\n # annotations:\n # storageclass.kubernetes.io/is-default-class: \"true\"\n # # label metadata\n # labels:\n # my-label-is: supercool\n # # defaults to WaitForFirstConsumer\n # volumeBindingMode: WaitForFirstConsumer\n # # defaults to Delete\n # reclaimPolicy: Retain\n # parameters:\n # encrypted: \"true\"\n \n volumeSnapshotClasses: []\n # Add VolumeSnapshotClass resources like:\n # - name: ebs-vsc\n # # annotation metadata\n # annotations:\n # snapshot.storage.kubernetes.io/is-default-class: \"true\"\n # # label metadata\n # labels:\n # my-label-is: supercool\n # # deletionPolicy must be specified\n # deletionPolicy: Delete\n # parameters:\n \n # Use old CSIDriver without an fsGroupPolicy set\n # Intended for use with older clusters that cannot easily replace the CSIDriver object\n # This parameter should always be false for new installations\n useOldCSIDriver: false", + "registry": { + "metadata": { + "uid": "5eecc89d0b150045ae661cef", + "name": "Public Repo", + "kind": "pack", + "isPrivate": false, + "providerType": "" + } + } + }, + { + "name": "hello-universe", + "type": "oci", + "layer": "addon", + "version": "1.1.3", + "tag": "1.1.3", + "values": "# spectrocloud.com/enabled-presets: Backend:disable-api\npack:\n content:\n images:\n - image: ghcr.io/spectrocloud/hello-universe:1.1.3\n spectrocloud.com/install-priority: 0\n\nmanifests:\n hello-universe:\n images:\n hellouniverse: ghcr.io/spectrocloud/hello-universe:1.1.3\n apiEnabled: false\n namespace: hello-universe\n port: 8080\n replicas: 1", + "registry": { + "metadata": { + "uid": "64eaff5630402973c4e1856a", + "name": "Palette Community Registry", + "kind": "oci", + "isPrivate": true, + "providerType": "pack" + } + } + } + ] + }, + "variables": [] + } +} +``` \ No newline at end of file diff --git a/docs/docs-content/getting-started/aws/aws.md b/docs/docs-content/getting-started/aws/aws.md index 131edf0171..82dc744d2a 100644 --- a/docs/docs-content/getting-started/aws/aws.md +++ b/docs/docs-content/getting-started/aws/aws.md @@ -48,5 +48,11 @@ your cluster is deployed, you can update it using cluster profile updates. buttonText: "Learn more", relativeURL: "./deploy-manage-k8s-cluster-tf", }, + { + title: "Scale, Upgrade, and Secure Clusters", + description: "Learn how to scale, upgrade, and secure Palette host clusters deployed to AWS.", + buttonText: "Learn more", + relativeURL: "./scale-secure-cluster", + }, ]} /> diff --git a/docs/docs-content/getting-started/aws/deploy-manage-k8s-cluster-tf.md b/docs/docs-content/getting-started/aws/deploy-manage-k8s-cluster-tf.md index ad927c9901..2772e3fd84 100644 --- a/docs/docs-content/getting-started/aws/deploy-manage-k8s-cluster-tf.md +++ b/docs/docs-content/getting-started/aws/deploy-manage-k8s-cluster-tf.md @@ -740,5 +740,5 @@ AWS cluster and then updated it to use a different version of a cluster profile. cluster profile roll backs. We encourage you to check out the -[Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest) provider page to -learn more about the Palette resources you can deploy using Terraform. +[Scale, Upgrade, and Secure Clusters](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest) tutorial +to learn how to perform common Day-2 operations on your deployed clusters. diff --git a/docs/docs-content/getting-started/aws/scale-secure-cluster.md b/docs/docs-content/getting-started/aws/scale-secure-cluster.md new file mode 100644 index 0000000000..b59cfb0737 --- /dev/null +++ b/docs/docs-content/getting-started/aws/scale-secure-cluster.md @@ -0,0 +1,516 @@ +--- +sidebar_label: "Scale, Upgrade, and Secure Clusters" +title: "Scale, Upgrade, and Secure Clusters" +description: "Learn how to scale, upgrade, and secure Palette host clusters deployed to AWS." +icon: "" +hide_table_of_contents: false +sidebar_position: 60 +tags: ["getting-started", "aws", "tutorial"] +--- + +Palette has in-built features to help with the automation of Day-2 operations. Upgrading and maintaining a deployed +cluster is typically complex because you need to consider any possible impact on service availability. Palette provides +out-of-the-box functionality for upgrades, observability, granular Role Based Access Control (RBAC), backup and security +scans. + +This tutorial will teach you how to use the Palette UI to perform scale and maintenance tasks on your clusters. You will +learn how to create Palette projects and teams, import a cluster profile, safely upgrade the Kubernetes version of a +deployed cluster and scale up your cluster nodes. + +## Prerequisites + +To complete this tutorial, follow the steps described in the [Set up Palette with AWS](./setup.md) guide to authenticate +Palette for use with your AWS cloud account. + +Additionally, you should install kubectl locally. Use the Kubernetes +[Install Tools](https://kubernetes.io/docs/tasks/tools/) page for further guidance. + +## Create Palette Projects + +Palette projects help you organize and manage cluster resources, providing logical groupings. They also allow you to +manage user access control through Role Based Access Control (RBAC). You can assign users and teams with specific roles +to specific projects. All resources created within a project are scoped to that project and only available to that +project, but a tenant can have multiple projects. + +Log in to [Palette](https://console.spectrocloud.com). + +Click on the **drop-down Menu** at the top of the page and switch to the **Tenant Admin** scope. Palette provides the +**Default** project out-of-the-box. + +![Image that shows how to select tenant admin scope](/getting-started/getting-started_scale-secure-cluster_switch-tenant-admin-scope.webp) + +Navigate to the left **Main Menu** and click on **Projects**. Click on the **Create Project** button. The **Create a new +project** dialog appears. + +Fill out the input fields with values from the table below to create a project. + +| Field | Description | Value | +| ----------- | ----------------------------------- | --------------------------------------------------------- | +| Name | The name of the project. | `Project-ScaleSecureTutorial` | +| Description | A brief description of the project. | Project for Scale, Upgrade, and Secure Clusters tutorial. | +| Tags | Add tags to the project. | `env:dev` | + +Click **Confirm** to create the project. Once Palette finishes creating the project, a new card appears on the +**Projects** page. + +Navigate to the left **Main Menu** and click on **Users & Teams**. + +Select the **Teams** tab. Then, click on **Create Team**. + +Fill in the **Team Name** with **scale-secure-tutorial-team**. Click on **Confirm**. + +Once Palette creates the team, select it from the **Teams** list. The **Team Details** pane opens. + +On the **Project Roles** tab, click on **New Project Role**. The list of project roles appears. + +Select the **Project-ScaleSecureTutorial** from the **Projects** drop-down. Then, select the **Cluster Profile Viewer** +and **Cluster Viewer** roles. Click on **Confirm**. + +![Image that shows how to select team roles](/getting-started/getting-started_scale-secure-cluster_select-team-roles.webp) + +Any users that you add to this team inherit the project roles assigned to it. Roles are the foundation of Palette's RBAC +enforcement. They allow a single user to have different types of access control based on the resource being accessed. In +this scenario, any user added to this team will have access to view any cluster profiles and clusters in the +**Project-ScaleSecureTutorial** project, but not modify them. Check out the +[Palette RBAC](../../user-management/palette-rbac/palette-rbac.md) section for more details. + +Navigate to the left **Main Menu** and click on **Projects**. + +Click on **Open project** on the **Project-ScaleSecureTutorial** card. + +![Image that shows how to open the tutorial project](/getting-started/getting-started_scale-secure-cluster_open-tutorial-project.webp) + +Your scope changes from **Tenant Admin** to **Project-ScaleSecureTutorial**. All further resources you create will be +part of this project. + +## Import a Cluster Profile + +Palette provides three resource contexts. They help you customize your environment to your organizational needs, as well +as control the scope of your settings. + +| Context | Description | +| ------- | ---------------------------------------------------------------------------------------- | +| System | Resources are available at the system level and to all tenants in the system. | +| Tenant | Resources are available at the tenant level and to all projects belonging to the tenant. | +| Project | Resources are available within a project and not available to other projects. | + +All of the resources you have created as part of your Getting Started journey have used the **Project** context. They +are only visible in the **Default** project. Therefore, you will need to create a new cluster profile in +**Project-ScaleSecureTutorial**. + +Navigate to the left **Main Menu** and click on **Profiles**. Click on **Import Cluster Profile**. The **Import Cluster +Profile** pane opens. + +Paste the following in the text editor. Click on **Validate**. The **Select repositories** dialog appears. + + + +Click on **Confirm**. Then, click on **Confirm** on the **Import Cluster Profile** pane. Palette creates a new cluster +profile named **aws-profile**. + +On the **Profiles** list, select **Project** from the **Contexts** drop-down. Your newly created cluster profile +displays. The Palette UI confirms that the cluster profile was created in the scope of the +**Project-ScaleSecureTutorial**. + +![Image that shows the cluster profile ](/getting-started/aws/getting-started_scale-secure-cluster_cluster-profile-created.webp) + +Select the cluster profile to view its details. The cluster profile summary appears. + +This cluster profile deploys the [Hello Universe](https://github.com/spectrocloud/hello-universe) application using a +pack. Click on the **hellouniverse 1.1.3** layer. The pack manifest editor appears. + +Click on **Presets** on the right-hand side. You can learn more about the pack presets on the pack README, which is +available in the Palette UI. Select the **Enable Hello Universe API** preset. The pack manifest changes accordingly. + +![Screenshot of pack presets](/getting-started/aws/getting-started_scale-secure-cluster_pack-presets.webp) + +The pack requires two values to be replaced for the authorization token and for the database password when using this +preset. Replace these values with your own base64 encoded values. The +[_hello-universe_](https://github.com/spectrocloud/hello-universe?tab=readme-ov-file#single-load-balancer) repository +provides a token that you can use. + +Click on **Confirm Updates**. The manifest editor closes. Then, click on **Save Changes** to save your updates. + +## Deploy a Cluster + +Navigate to the left **Main Menu** and select **Clusters**. Click on **Create Cluster**. + +Palette will prompt you to select the type of cluster. Select **AWS IaaS** and click on **Start AWS IaaS +Configuration**. + +Continue with the rest of the cluster deployment flow using the cluster profile you created in the +[Import a Cluster Profile](#import-a-cluster-profile) section, named **aws-profile**. Refer to the +[Deploy a Cluster](./deploy-k8s-cluster.md#deploy-a-cluster) tutorial for additional guidance or if you need a refresher +of the Palette deployment flow. + +### Verify the Application + +Navigate to the left **Main Menu** and select **Clusters**. + +Select your cluster to view its **Overview** tab. + +When the application is deployed and ready for network traffic, Palette exposes the service URL in the **Services** +field. Click on the URL for port **:8080** to access the Hello Universe application. + +![Cluster details page with service URL highlighted](/getting-started/aws/getting-started_scale-secure-cluster_service_url.webp) + +## Upgrade Kubernetes Versions + +Regularly upgrading your Kubernetes version is an important part of maintaining a good security posture. New versions +may contain important patches to security vulnerabilities and bugs that could affect the integrity and availability of +your clusters. + +Palette supports three minor Kubernetes versions at any given time. We support the current release and the three +previous minor version releases, also known as N-3. For example, if the current release is 1.29, we support 1.28, 1.27, +and 1.26. + +:::warning + +Once you upgrade your cluster to a new Kubernetes version, you will not be able to downgrade. + +::: + +We recommend using cluster profile versions to safely upgrade any layer of your cluster profile and maintain the +security of your clusters. Expand the following section to learn how to create a new cluster profile version with a +Kubernetes upgrade. + +
+ +Upgrade Kubernetes using Cluster Profile Versions + +Navigate to the left **Main Menu** and click on **Profiles**. Select the cluster profile that you used to deploy your +cluster, named **aws-profile**. The cluster profile details page appears. + +Click on the version drop-down and select **Create new version**. The version creation dialog appears. + +Fill in **1.1.0** in the **Version** input field. Then, click on **Confirm**. The new cluster profile version is created +with the same layers as version **1.0.0**. + +Select the **kubernetes 1.27.x** layer of the profile. The pack manifest editor appears. + +Click on the **Pack Version** dropdown. All of the available versions of the **Palette eXtended Kubernetes** pack +appear. The cluster profile is configured to use the latest patch version of **Kubernetes 1.27**. + +![Cluster profile with all Kubernetes versions](/getting-started/aws/getting-started_scale-secure-cluster_kubernetes-versions.webp) + +The official guidelines for Kubernetes upgrades recommend upgrading one minor version at a time. For example, if you are +using Kubernetes version 1.26, you should upgrade to 1.27, before upgrading to version 1.28. You can learn more about +the official Kubernetes upgrade guidelines in the +[Version Skew Policy](https://kubernetes.io/releases/version-skew-policy/) page. + +Select **1.28.x** from the version dropdown. This selection follows the Kubernetes upgrade guidelines as the cluster +profile is using **1.27.x**. + +The manifest editor highlights the changes made by this upgrade. Once you have verified that the upgrade changes +versions as expected, click on **Confirm changes**. + +Click on **Confirm Updates**. Then, click on **Save Changes** to persist your updates. + +Navigate to the left **Main Menu** and select **Clusters**. Select your cluster to view its **Overview** tab. + +Select the **Profile** tab. Your cluster is currently using the **1.0.0** version of your cluster profile. + +Change the cluster profile version by selecting **1.1.0** from the version drop-down. Click on **Review & Save**. The +**Changes Summary** dialog appears. + +Click on **Review changes in Editor**. The **Review Update Changes** dialog displays the same Kubernetes version +upgrades as the cluster profile editor previously did. Click on **Update**. + +
+ +Upgrading the Kubernetes version of your cluster modifies an infrastructure layer. Therefore, Kubernetes needs to +replace its nodes. This is known as a repave. Check out the +[Node Pools](../../clusters/cluster-management/node-pool.md#repave-behavior-and-configuration) page to learn more about +the repave behavior and configuration. + +Click on the **Nodes** tab. You can follow along with the node upgrades on this screen. Palette replaces the nodes +configured with the old Kubernetes version with newly upgraded ones. This may lead to some application level outages, as +Kubernetes swaps the workloads to the upgraded nodes. + +![Node repaves in progress](/getting-started/aws/getting-started_scale-secure-cluster_node-repaves.webp) + +### Verify the Application + +The cluster update completes when the Palette UI marks the cluster profile layers as green and the cluster is in a +**Healthy** state. The cluster **Overview** page also displays the Kubernetes version as **1.28**. Click on the URL for +port **:8080** to access the application and verify that your upgraded cluster is functional. + +![Kubernetes upgrade applied](/getting-started/aws/getting-started_scale-secure-cluster_kubernetes-upgrade-applied.webp) + +## Scan Clusters + +Palette provides compliance, security, conformance, and Software Bill of Materials (SBOM) scans on tenant clusters. +These scans ensure cluster adherence to specific compliance and security standards, as well as detect potential +vulnerabilities. You can perform four types of scans on your cluster. + +| Scan | Description | +| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Kubernetes Configuration Security | This scan examines the compliance of deployed security features against the CIS Kubernetes Benchmarks, which are consensus-driven security guidelines for Kubernetes. By default, the test set will execute based on the cluster Kubernetes version. | +| Kubernetes Penetration Testing | This scan evaluates Kubernetes-related open-ports for any configuration issues that can leave the tenant clusters exposed to attackers. It hunts for security issues in your clusters and increases visibility of the security controls in your Kubernetes environments. | +| Kubernetes Conformance Testing | This scan validates your Kubernetes configuration to ensure that it conforms to CNCF specifications. Palette leverages an open-source tool called [Sonobuoy](https://sonobuoy.io) to perform this scan. | +| Software Bill of Materials (SBOM) | This scan details the various third-party components and dependencies used by your workloads and helps to manage security and compliance risks associated with those components. | + +Navigate to the left **Main Menu** and select **Clusters**. Select your cluster to view its **Overview** tab. + +Select the **Scan** tab. The list of all the available cluster scans appears. Palette indicates that you have never +scanned your cluster. + +![Scans never performed on the cluster](/getting-started/aws/getting-started_scale-secure-cluster_never-scanned-cluster.webp) + +Click **Run Scan** on the **Kubernetes configuration security** and **Kubernetes penetration testing** scans. Palette +schedules and executes these scans on your cluster, which may take a few minutes. Once they complete, you can download +the report in PDF, CSV or view the results directly in the Palette UI. + +![Scans completed on the cluster](/getting-started/aws/getting-started_scale-secure-cluster_scans-completed.webp) + +Click on **Configure Scan** on the **Software Bill of Materials (SBOM)** scan. The **Configure SBOM Scan** dialog +appears. + +Leave the default selections on this screen and click on **Confirm**. Optionally, you can configure an S3 bucket to save +your report into. Refer to the +[Configure an SBOM Scan](../../clusters/cluster-management/compliance-scan.md#configure-an-sbom-scan) guide to learn +more about the configuration options of this scan. + +Once the scan completes, click on the report to view it within the Palette UI. The third-party dependencies that your +workloads rely on are evaluated for potential security vulnerabilities. Reviewing the SBOM enables organizations to +track vulnerabilities, perform regular software maintenance, and ensure compliance with regulatory requirements. + +:::info + +The scan reports highlight any failed checks, based on Kubernetes community standards and CNCF requirements. We +recommend that you prioritize the rectification of any identified issues. + +::: + +As you have seen so far, Palette scans are crucial when maintaining your security posture. Palette provides the ability +to schedule your scans and periodically evaluate your clusters. In addition, it keeps a history of previous scans for +comparison purposes. Expand the following section to learn how to configure scan schedules for your cluster. + +
+ +Configure Cluster Scan Schedules + +Click on **Settings**. Then, select **Cluster Settings**. The **Settings** pane appears. + +Select the **Schedule Scans** option. You can configure schedules for you cluster scans. Palette provides common scan +schedules or you can provide a custom time. We recommend choosing a schedule when you expect the usage of your cluster +to be lowest. Otherwise, the scans may impact the performance of your nodes. + +![Scan schedules](/getting-started/aws/getting-started_scale-secure-cluster_scans-schedules.webp) + +Palette will automatically scan your cluster according to your configured schedule. + +
+ +## Scale a Cluster + +A node pool is a group of nodes within a cluster that all have the same configuration. You can use node pools for +different workloads. For example, you can create a node pool for your production workloads and another for your +development workloads. You can update node pools for active clusters or create a new one for the cluster. + +Navigate to the left **Main Menu** and select **Clusters**. Select your cluster to view its **Overview** tab. + +Select the **Nodes** tab. Your cluster has a **control-plane-pool** and a **worker-pool**. Each pool contains one node. + +Select the **Overview** tab. Download the [kubeconfig](../../clusters/cluster-management/kubeconfig.md) file. + +![kubeconfig download](/getting-started/aws/getting-started_scale-secure-cluster_download-kubeconfig.webp) + +Open a terminal window and set the environment variable `KUBECONFIG` to point to the file you downloaded. + +```shell +export KUBECONFIG=~/Downloads/admin.aws-cluster.kubeconfig +``` + +Execute the following command in your terminal to view the nodes of your cluster. + +```shell +kubectl get nodes +``` + +The output reveals two nodes, one for the worker pool and one for the control plane. Make a note of the name of your +worker node, which is the node that does not have the `control-plane` role. In the example below, +`ip-10-0-1-133.ec2.internal` is the name of the worker node. + +```shell +NAME STATUS ROLES AGE VERSION +ip-10-0-1-133.ec2.internal Ready 46m v1.28.11 +ip-10-0-1-95.ec2.internal Ready control-plane 51m v1.28.11 +``` + +The Hello Universe pack deploys three pods in the `hello-universe` namespace. Execute the following command to verify +where these pods have been scheduled. + +```shell +kubectl get pods --namespace hello-universe --output wide +``` + +The output verifies that all of the pods have been scheduled on the worker node you made a note of previously. + +```shell +NAME READY STATUS RESTARTS AGE NODE +api-7db799cf85-5w5l6 1/1 Running 1 (20m ago) 20m ip-10-0-1-133.ec2.internal +postgres-698d7ff8f4-vbktf 1/1 Running 0 20m ip-10-0-1-133.ec2.internal +ui-5f777c76df-pplcv 1/1 Running 0 20m ip-10-0-1-133.ec2.internal +``` + +Navigate back to the Palette UI in your browser. Select the **Nodes** tab. + +Click on **New Node Pool**. The **Add node pool** dialog appears. This workflow allows you to create a new worker pool +for your cluster. Fill in the following configuration. + +| Field | Value | Description | +| --------------------- | ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Node pool name** | `worker-pool-2` | The name of your worker pool. | +| **Enable Autoscaler** | Enabled | Whether Palette should scale the pool horizontally based on its per-node workload counts. The **Minimum size** parameter specifies the lower bound of nodes in the pool and the **Maximum size** specifies the upper bound. By default, **Minimum size** is `1` and **Maximum size** is `3`. | +| **Instance Type** | `m4.2xlarge` | Set the compute size equal to the already provisioned nodes. | +| **Availability Zone** | _Availability zone of your choice_ | Set the availability zone the same as the already provisioned nodes. | + +Click on **Confirm**. The dialog closes. Palette begins provisioning your node pool. Once the process completes, your +three node pools appear in a healthy state. + +![New worker pool provisioned](/getting-started/aws/getting-started_scale-secure-cluster_third-node-pool.webp) + +Navigate back to your terminal and execute the following command in your terminal to view the nodes of your cluster. + +```shell +kubectl get nodes +``` + +The output reveals three nodes, two for worker pools and one for the control plane. Make a note of the names of your +worker nodes. In the example below, `ip-10-0-1-133.ec2.internal` and `ip-10-0-1-32.ec2.internal` are the worker nodes. + +```shell +NAME STATUS ROLES AGE VERSION +ip-10-0-1-32.ec2.internal Ready 16m v1.28.11 +ip-10-0-1-133.ec2.internal Ready 46m v1.28.11 +ip-10-0-1-95.ec2.internal Ready control-plane 51m v1.28.11 +``` + +It is common to dedicate node pools to a particular type of workload. One way to specify this is through the use of +Kubernetes taints and tolerations. + +Taints provide nodes with the ability to repel a set of pods, allowing you to mark nodes as unavailable for certain +pods. Tolerations are applied to pods and allow the pods to schedule onto nodes with matching taints. Once configured, +nodes do not accept any pods that do not tolerate the taints. + +The animation below provides a visual representation of how taints and tolerations can be used to specify which +workloads execute on which nodes. + +![Taints repel pods to a new node](/getting-started/getting-started_scale-secure-cluster_taints-in-action.gif) + +Switch back to Palette in your web browser. Navigate to the left **Main Menu** and select **Profiles**. Select the +cluster profile deployed to your cluster, named `aws-profile`. Ensure that the **1.1.0** version is selected. + +Click on the **hellouniverse 1.1.3** layer. The manifest editor appears. Set the +`manifests.hello-universe.ui.useTolerations` field on line 20 to `true`. Then, set the +`manifests.hello-universe.ui.effect` field on line 22 to `NoExecute`. This toleration describes that the UI pods of +Hello Universe will tolerate the taint with the key `app`, value `ui` and effect `NoExecute`. The tolerations of the UI +pods should be as below. + +```yaml +ui: + useTolerations: true + tolerations: + effect: NoExecute + key: app + value: ui +``` + +Click on **Confirm Updates**. The manifest editor closes. Then, click on **Save Changes** to persist your changes. + +Navigate to the left **Main Menu** and select **Clusters**. Select your deployed cluster, named **aws-cluster**. + +Due to the changes you have made to the cluster profile, this cluster has a pending update. Click on **Updates**. The +**Changes Summary** dialog appears. + +Click on **Review Changes in Editor**. The **Review Update Changes** dialog appears. The toleration changes appear as +incoming configuration. + +Click on **Apply Changes** to apply the update to your cluster. + +Select the **Nodes** tab. Click on **Edit** on the first worker pool, named **worker-pool**. The **Edit node pool** +dialog appears. + +Click on **Add New Taint** in the **Taints** section. Fill in `app` for the **Key**, `ui` for the **Value** and select +`NoExecute` for the **Effect**. These values match the toleration you specified in your cluster profile earlier. + +![Add taint to worker pool](/getting-started/aws/getting-started_scale-secure-cluster_add-taint.webp) + +Click on **Confirm** to save your changes. The nodes in the `worker-pool` can now only execute the UI pods that have a +toleration matching the configured taint. + +Switch back to your terminal. Execute the following command again to verify where the Hello Universe pods have been +scheduled. + +```shell +kubectl get pods --namespace hello-universe --output wide +``` + +The output verifies that the UI pods have remained scheduled on their original node named `ip-10-0-1-133.ec2.internal`, +while the other two pods have been moved to the node of the second worker pool named `ip-10-0-1-32.ec2.internal`. + +```shell +NAME READY STATUS RESTARTS AGE NODE +api-7db799cf85-5w5l6 1/1 Running 1 (20m ago) 20m ip-10-0-1-32.ec2.internal +postgres-698d7ff8f4-vbktf 1/1 Running 0 20m ip-10-0-1-32.ec2.internal +ui-5f777c76df-pplcv 1/1 Running 0 20m ip-10-0-1-133.ec2.internal +``` + +Taints and tolerations are a common way of creating nodes dedicated to certain workloads, once the cluster has scaled +accordingly through its provisioned node pools. Refer to the +[Taints and Tolerations](../../clusters/cluster-management/taints.md) guide to learn more. + +### Verify the Application + +Select the **Overview** tab. Click on the URL for port **:8080** to access the Hello Universe application and verify +that the application is functioning correctly. + +## Cleanup + +Use the following steps to remove all the resources you created for the tutorial. + +To remove the cluster, navigate to the left **Main Menu** and click on **Clusters**. Select the cluster you want to +delete to access its details page. + +Click on **Settings** to expand the menu, and select **Delete Cluster**. + +![Delete cluster](/getting-started/aws/getting-started_scale-secure-cluster_delete-cluster-button.webp) + +You will be prompted to type in the cluster name to confirm the delete action. Type in the cluster name `aws-cluster` to +proceed with the delete step. The deletion process takes several minutes to complete. + +:::info + +If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for a force delete. To trigger a force +delete, navigate to the cluster’s details page, click on **Settings**, then select **Force Delete Cluster**. Palette +automatically removes clusters stuck in the cluster deletion phase for over 24 hours. + +::: + +Once the cluster is deleted, navigate to the left **Main Menu** and click on **Profiles**. Find the cluster profile you +created and click on the **three-dot Menu** to display the **Delete** button. Select **Delete** and confirm the +selection to remove the cluster profile. + +Click on the **drop-down Menu** at the top of the page and switch to **Tenant Admin** scope. + +Navigate to the left **Main Menu** and click on **Projects**. + +Click on the **three-dot Menu** of the **Project-ScaleSecureTutorial** and select **Delete**. A pop-up box will ask you +to confirm the action. Confirm the deletion. + +Navigate to the left **Main Menu** and click on **Users & Teams**. Select the **Teams** tab. + +Click on **scale-secure-tutorial-team** list entry. The **Team Details** pane appears. Click on **Delete Team**. A +pop-up box will ask you to confirm the action. Confirm the deletion. + +## Wrap-up + +In this tutorial, you learned how to perform very important operations relating to the scalability and availability of +your clusters. First, you created a project and team. Next, you imported a cluster profile and deployed a host AWS +cluster. Then, you upgraded the Kubernetes version of your cluster and scanned your clusters using Palette's scanning +capabilities. Finally, you scaled your cluster's nodes and used taints to select which Hello Universe pods execute on +them. + +We encourage you to check out the [Additional Capabilities](../additional-capabilities.md) to explore other Palette +functionalities. diff --git a/docs/docs-content/getting-started/azure/create-cluster-profile.md b/docs/docs-content/getting-started/azure/create-cluster-profile.md index 977fdd0db1..0a39e728e4 100644 --- a/docs/docs-content/getting-started/azure/create-cluster-profile.md +++ b/docs/docs-content/getting-started/azure/create-cluster-profile.md @@ -12,7 +12,7 @@ Palette offers profile-based management for Kubernetes, enabling consistency, re across multiple clusters. A cluster profile allows you to customize the cluster infrastructure stack, allowing you to choose the desired Operating System (OS), Kubernetes, Container Network Interfaces (CNI), Container Storage Interfaces (CSI). You can further customize the stack with add-on application layers. For more information about cluster profile -types, refer to [Cluster Profiles]((../introduction.md#cluster-profiles). +types, refer to [Cluster Profiles](../introduction.md#cluster-profiles). In this tutorial, you create a full profile directly from the Palette dashboard. Then, you add a layer to your cluster profile by using a [community pack](../../integrations/community_packs.md) to deploy a web application. diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_add-taint.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_add-taint.webp new file mode 100644 index 0000000000..a158847423 Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_add-taint.webp differ diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_cluster-profile-created.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_cluster-profile-created.webp new file mode 100644 index 0000000000..4c4ae7c386 Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_cluster-profile-created.webp differ diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_delete-cluster-button.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_delete-cluster-button.webp new file mode 100644 index 0000000000..9df2588d06 Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_delete-cluster-button.webp differ diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_download-kubeconfig.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_download-kubeconfig.webp new file mode 100644 index 0000000000..3facd33b30 Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_download-kubeconfig.webp differ diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_kubernetes-upgrade-applied.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_kubernetes-upgrade-applied.webp new file mode 100644 index 0000000000..a21617c726 Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_kubernetes-upgrade-applied.webp differ diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_kubernetes-versions.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_kubernetes-versions.webp new file mode 100644 index 0000000000..403dcce3f6 Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_kubernetes-versions.webp differ diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_never-scanned-cluster.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_never-scanned-cluster.webp new file mode 100644 index 0000000000..918bd55322 Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_never-scanned-cluster.webp differ diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_node-repaves.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_node-repaves.webp new file mode 100644 index 0000000000..c8c0f1624d Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_node-repaves.webp differ diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_pack-presets.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_pack-presets.webp new file mode 100644 index 0000000000..3fcaf0336e Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_pack-presets.webp differ diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_scans-completed.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_scans-completed.webp new file mode 100644 index 0000000000..42894b789c Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_scans-completed.webp differ diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_scans-schedules.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_scans-schedules.webp new file mode 100644 index 0000000000..4c203c9228 Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_scans-schedules.webp differ diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_service_url.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_service_url.webp new file mode 100644 index 0000000000..6df61924f6 Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_service_url.webp differ diff --git a/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_third-node-pool.webp b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_third-node-pool.webp new file mode 100644 index 0000000000..9596d7c334 Binary files /dev/null and b/static/assets/docs/images/getting-started/aws/getting-started_scale-secure-cluster_third-node-pool.webp differ diff --git a/static/assets/docs/images/getting-started/getting-started_scale-secure-cluster_open-tutorial-project.webp b/static/assets/docs/images/getting-started/getting-started_scale-secure-cluster_open-tutorial-project.webp new file mode 100644 index 0000000000..09c28320a5 Binary files /dev/null and b/static/assets/docs/images/getting-started/getting-started_scale-secure-cluster_open-tutorial-project.webp differ diff --git a/static/assets/docs/images/getting-started/getting-started_scale-secure-cluster_select-team-roles.webp b/static/assets/docs/images/getting-started/getting-started_scale-secure-cluster_select-team-roles.webp new file mode 100644 index 0000000000..a9b14100cc Binary files /dev/null and b/static/assets/docs/images/getting-started/getting-started_scale-secure-cluster_select-team-roles.webp differ diff --git a/static/assets/docs/images/getting-started/getting-started_scale-secure-cluster_switch-tenant-admin-scope.webp b/static/assets/docs/images/getting-started/getting-started_scale-secure-cluster_switch-tenant-admin-scope.webp new file mode 100644 index 0000000000..37b4a5bc8f Binary files /dev/null and b/static/assets/docs/images/getting-started/getting-started_scale-secure-cluster_switch-tenant-admin-scope.webp differ diff --git a/static/assets/docs/images/getting-started/getting-started_scale-secure-cluster_taints-in-action.gif b/static/assets/docs/images/getting-started/getting-started_scale-secure-cluster_taints-in-action.gif new file mode 100644 index 0000000000..f1ea6f4431 Binary files /dev/null and b/static/assets/docs/images/getting-started/getting-started_scale-secure-cluster_taints-in-action.gif differ