From 72e8202959eaece39af7370680412f3f08e9e3cc Mon Sep 17 00:00:00 2001 From: Luciano Date: Tue, 23 Mar 2021 15:35:05 -0300 Subject: [PATCH 1/5] add draft readme for k8s workload registrar tutorial Signed-off-by: Luciano --- k8s/k8s-workload-registrar/README.md | 474 +++++++++++++++++++++++++++ 1 file changed, 474 insertions(+) create mode 100644 k8s/k8s-workload-registrar/README.md diff --git a/k8s/k8s-workload-registrar/README.md b/k8s/k8s-workload-registrar/README.md new file mode 100644 index 0000000..24a44c0 --- /dev/null +++ b/k8s/k8s-workload-registrar/README.md @@ -0,0 +1,474 @@ +# Configure SPIRE to use the Kubernetes Workload Registrar +This tutorial builds on the [Kubernetes Quickstart Tutorial](../quickstart/) and provides an example of how to configure SPIRE to use the Kubernetes Workload Registrar as a container within the SPIRE Server pod. With this tool, automatic workload registration and management is added to SPIRE. The changes required to deploy the registrar and the necessary files are shown as a delta to the quickstart tutorial, so it is highly encouraged to execute, or at least read through, the Kubernetes Quickstart Tutorial first. + +In this document you will learn how to: +* Deploy the K8s Workload Registrar as a container within the SPIRE Server Pod +* Configure the 3 available modes and their differences +* Use the 3 available workload registration modes +* Test successful registration entries creation + +# Prerequisites +Before proceeding, review the following list: +* You'll need access to the Kubernetes environment configured when going through the [Kubernetes Quickstart Tutorial](../quickstart/). +* Required configuration files for this tutorial can be found in the `k8s/k8s-workload-registrar` directory in https://github.com/spiffe/spire-tutorials. If you didn't already clone the repo for the _Kubernetes Quickstart Tutorial_, please do so now. +* The steps in this document should work with Kubernetes version 1.20.2. + +We will deploy an scenario that consists of a statefulset containing a SPIRE Server and the Kubernetes Workload Registrar, a SPIRE Agent, and a workload, and configure the different modes to illustrate the automatic registration entries creation. + +# Common configuration + +The SPIRE Server and the Kubernetes Workload registrar will communicate each other using a socket, that will be mounted at the `/tmp/spire-server/private` directory, as we can see from the `volumeMounts` section of both containers. The only difference between these sections is that, for the registrar, the socket will have the `readOnly` option set to `false`, while for the SPIRE Server container it will have its value set to `true`. Below, this section is shown for the registrar container. + +``` +- name: spire-server-socket + mountPath: /tmp/spire-server/private + readOnly: true +``` + +# Webhook mode (default) + +This mode makes use of the `ValidatingWebhookConfiguration` feature from Kubernetes, which is called by the Kubernetes API server everytime a new pod is created or deleted in the cluster, as we can see from the rules of the resource below: + +``` +ApiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: k8s-workload-registrar-webhook +webhooks: + - name: k8s-workload-registrar.spire.svc + clientConfig: + service: + name: k8s-workload-registrar + namespace: spire + path: "/validate" + caBundle: ... + admissionReviewVersions: + - v1beta1 + rules: + - apiGroups: [""] + apiVersions: ["v1"] + operations: ["CREATE", "DELETE"] + resources: ["pods"] + scope: "Namespaced" +``` + +This webhook itself authenticates the API server, and for this reason we provide a CA bundle, with the `caBundle` option, as we can see in the stanza above (value ommited for brevity). This authentication must be done to ensure that it is the API server who is contacting the webhook, because this situation will lead to registration entries creation or deletion on the SPIRE Server, something that is a key point in the SPIRE infrastructure. + +Also, a secret is volume mounted in the `/run/spire/k8s-workload-registrar/secret` directory inside the SPIRE Server container, containing the K8S Workload Registrar server key. We can see this in the `volumeMounts` section of the SPIRE Server statefulset configuration file: + +``` +- name: k8s-workload-registrar-secret + mountPath: /run/spire/k8s-workload-registrar/secret + readOnly: true +``` + +The secret itself is named `k8s-workload-registrar-secret` and is shown below: + +``` +apiVersion: v1 +kind: Secret +metadata: + name: k8s-workload-registrar-secret + namespace: spire +type: Opaque +data: + server-key.pem: ... +``` + +Again, the value of the key is ommited. + +Another configuration that is relevant in this mode is the registrar certificates `ConfigMap`, that contains the K8S Workload Registrar server certificate and CA bundle used to verify the client certificate presented by the API server. This is mounted in the `/run/spire/k8s-workload-registrar/certs` directory. We can also check this by seeing the `volumeMounts` section of the SPIRE Server statefulset configuration file, which is shown below: + +``` +- name: k8s-workload-registrar-certs + mountPath: /run/spire/k8s-workload-registrar/certs + readOnly: true +``` + +The certificates for the CA and for the server are stored in a `ConfigMap`: + +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-workload-registrar-certs + namespace: spire +data: + server-cert.pem: | + -----BEGIN CERTIFICATE----- + + ... + + -----END CERTIFICATE----- + + cacert.pem: | + -----BEGIN CERTIFICATE----- + + ... + + -----END CERTIFICATE----- +``` + +With all of this set, we can look at at the registrar's container configuration: + +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-workload-registrar + namespace: spire +data: + k8s-workload-registrar.conf: | + cert_path = "/run/spire/k8s-workload-registrar/certs/server-cert.pem" + key_path = "/run/spire/k8s-workload-registrar/secret/server-key.pem" + cacert_path = "/run/spire/k8s-workload-registrar/certs/cacert.pem" + trust_domain = "example.org" + cluster = "k8stest" + server_socket_path = "/tmp/spire-server/private/api.sock" + insecure_skip_client_verification = false +``` + +As we can see, the `key_path` points to where the secret containing the server key is mounted, which was shown earlier. The `cert_path` and `cacert_path` points to the directory where the `ConfigMap` with the PEM encoded certificates for the server and for the CA are mounted. When the webhook is triggered, the registrar acts as the server and validates the identity of the client, which is the Kubernetes API server in this case. We can disable this authentication by setting the ```insecure_skip_client_verification``` option to `true` (though it is not recommended). + +For the authentication, a `KubeConfig` file with the client certificate and key the API server should use to authenticate with the registrar is mounted inside the filesystem of the Kubernetes node. This file is shown below: + +``` +apiVersion: v1 +kind: Config +users: +- name: k8s-workload-registrar.spire.svc + user: + client-certificate-data: ... + client-key-data: ... +``` + +To be mounted, an `AdmissionConfiguration` describes where the API server can locate the file containing the `KubeConfig` entry. This file is passed to the API server via the `--extra-config=apiserver.admission-control-config-file` flag. + +``` +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: ValidatingAdmissionWebhook + configuration: + apiVersion: apiserver.config.k8s.io/v1alpha1 + kind: WebhookAdmission + kubeConfigFile: /etc/kubernetes/pki/admctrl/kubeconfig.yaml +``` + +We have looked at the key points of the webhook mode's configuration, so let's apply the necessary files to set our scenario with a SPIRE Server with the registrar container in it, an Agent, and a workload, by issuing the following command: + +```console +$ insert command to deploy the scenario for the webhook mode +``` + +This is all we need to have the registration entries created on the server. We will start a shell into the SPIRE Server container and run the entry show directive by executing the command below: + +```console +$ insert command to see registration entries +``` + +You should see the following 3 registration entries, corresponding to the node, the agent, and the workload. + +***insert reg entries*** + +Let's see how are they built: + +The cluster name is used as Parent ID, and there is no reference to the node that the pod belongs to, this is, all the registration entries are mapped to a single node entry inside the cluster. This represents a drawback for this mode, as all the nodes in the cluster have permission to get identities for all the workloads that belong to the Kubernetes cluster, which increases the blast ratio in case of a node being compromised, among other disadvantages. + +Taking a look on the assigned SPIFFE IDs for the agent and the workload, we can see that they have the following form: +*spiffe://\/ns/\/sa/\*. +From this, we can conclude that we are using the registrar configured with the Service Account Based workload registration (which is the default behaviour). For instance, as the workload uses the *default* service account, into the *spire* namespace, its SPIFFE ID is: *spiffe://example.org/ns/spire/sa/default* + +Another thing that is worth looking, is the registrar log, in which we will found out if the entries were created by this container. Run the following command to get the logs of the registrar, and to look for the *Created pod entry* keyword. + +***insert command to get the logs and grep over the desired keyword*** + +The result should be similar to the one shown below: + +***insert output of the command above*** + +We can check that the 3 entries that were present on the SPIRE Server were created by the registrar, and correspond to the node, agent, and workload, in that specific order. + +## Pod deletion + +Let's see how the registrar handles a pod deletion, and which impact does it have on the registration entries. Run the following command to delete the workload deployment: + +```console +$ insert command to delete workload pod +``` +Again, check for the registration entries with the command below: + +```console +$ insert command to see registration entries +``` + +The output of the command will not include the registration entry that correspond to the workload, because the pod was deleted, and should be similar to this: + +***insert reg entries*** + +As the pod was deleted, we will check the registrar logs, looking for the "Deleted pod entry" keyword, with the command shown below: + +```console +$ insert command to get the logs and grep over the desired keyword +``` + +The output should be similar to: + +***insert output of the command above*** + +From which we can conclude that the registrar successfuly deleted the corresponding entry of the recently deleted pod. + +# Reconcile mode + +This mode, as opposed to Webhook mode, does not use a validating webhook but two reconciling controllers instead: one for the nodes and one for the pods. For this reason, we will not deploy all the configuration needed to perform the Kubernetes API server authentication, as the secret and `KubeConfig` entry, for instance, situation that makes the configuration simpler. + +We will jump directly into the registrar's container configuration, which is shown below: + +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-workload-registrar + namespace: spire +data: + k8s-workload-registrar.conf: | + mode = "reconcile" + trust_domain = "example.org" + cluster = "k8stest" + server_socket_path = "/tmp/spire-server/private/api.sock" + metrics_addr = "0" + pod_label = "spire-workload" +``` + +We are explicitly indicating that *reconcile* mode is used. For the sake of the tutorial, we will be using Label Based workload registration for this mode (as we can see from the `pod_label` configurable), though every workload registration mode can be used with every registrar mode. This is all the configuration that is needed to have the containers working properly. + +We will deploy the same scenario as the previous mode, with the difference on the agent and workload pods: they will be labeled with the *spire-workload* label, that corresponds to the value indicated in the `pod_label` option of the `ConfigMap` shown above. Run the following command to set the scenario: + +```console +$ insert command to deploy the scenario for the reconcile mode +``` + +With the Reconcile scenario set, we will check the registration entries and some special considerations for this mode. Let's issue the command below to start a shell into the SPIRE Server container, and to show the existing registration entries. + +```console +$ insert command to see registration entries +``` + +Your output should similar to the following, and shows the entries for the node, the agent and the workload: + +***insert reg entries*** + +If we compare this entries to the Webhook mode ones, the difference is that the Parent ID of the SVID contains a reference to the node name where the pod is scheduled on. We mentioned that this is not happening using the Webhook node, and this was one of its principal drawbacks. Also, for the node registration entry (the one that has the SPIRE Server SPIFFE ID as the Parent ID), node name is used in the selectors, along with the cluster name. For the remaining two entries, pod name and namespace are used in the selectors instead. + +As we are using Label workload registration mode, the SPIFFE ID's for the agent and the workload (which are labeled as we mentioned before) have the form: *spiffe://\/\*. For example, as the agent has the label value equal to `agent`, it has the following SPIFFE ID: *spiffe://example.org/agent*. + +Let's check if the registrar indeed created the registration entries, by checking its logs, and looking for the *Created new spire entry* keyword. Run the command that is shown below: + +```console +$ insert command to see the registrar logs +``` + +The output will be similar to this: + +***insert output of show logs command*** + +We mentioned before that there were two reconciling controllers, and we are seeing now that the node controller created the entry for the single node in the cluster, and that the pod controller created the entries for the two labeled pods: agent and workload. + +## Pod deletion + +The Kubernetes Workload Registrar automatically handles the creation and deletion of registration entries. We already see how the entries are created, and now we will test its deletion. Let's delete the workload deployment: + +```console +$ insert command to delete workload deployment +``` + +We will check if its corresponding entry is deleted too. Run the following command to see the registration entries on the SPIRE Server: + +```console +$ insert command to see registration entries +``` + +The output will only show two registration entries, because the workload entry was deleted by the registrar: + +***insert reg entries*** + +If we look for the *Deleted entry* keyword on the registrar logs, we will find out that the registrar deleted the entry. Issue the following command: + +```console +$ insert command to see registrar logs +``` + +The output should be similar to: + +***insert output of above command*** + +The registrar successfuly deleted the entry. + +## Non-labeled pods + +As we are using Label Based workload registration, only pods that have the label *spire-workload* will have its registration entry automatically created. Let's deploy a pod that has no label with the command below: + +```console +$ insert command to deploy a non-labeled workload +``` + +Let's see the existing registration entries with the command: + +```console +$ insert command to see registration entries +``` + +It's output should be similar to: + +***insert reg entries*** + +We see that the entries are the same as before, and that no entry has been created for the new workload. This is the expected behaviour, as only labeled pods will be considered by the workload registrar while using the Label Workload registration mode. + +# CRD mode + +This mode takes advantage of the `CustomResourceDefinition` feature from Kubernetes, which allows SPIRE to integrate with this tool and its control plane. A SPIFFE ID is defined as a custom resource, with an structure that matches the form of a registration entry. Below is a reduced example of the definition of a SPIFFE ID CRD. + +``` +apiVersion: spiffeid.spiffe.io/v1beta1 +kind: SpiffeID +metadata: + name: my-test-spiffeid + namespace: default +spec: + parentId: spiffe://example.org/spire/server + selector: + namespace: default + podName: my-test-pod + spiffeId: spiffe://example.org/test +``` + +The main goal of the custom resource is to track the intent of what and how the registration entries should look on the SPIRE Server, and to track any modification of these registration entries, reconciling its existence. This means that every SPIFFE ID CRD will have a matching registration entry, whose existence will be closely linked. Every modification done to the registration entry will have an impact on its corresponding SPIFFE ID CRD, and viceversa. + +The `ConfigMap` for the registrar below shows that we will be using the *crd* mode, and that Annotation Based workload registration is used along with it. The annotation that the registrar will look for is *spiffe.io/spiffe-id*. + +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-workload-registrar + namespace: spire +data: + k8s-workload-registrar.conf: | + trust_domain = "example.org" + server_socket_path = "/tmp/spire-server/private/api.sock" + cluster = "k8stest" + mode = "crd" + pod_annotation = "spiffe.io/spiffe-id" +``` + +Let's deploy the necessary files, including the base scenario plus the SPIFFE ID CRD definition, and examine the automatically created registration entries. + +```console +$ insert command to deploy the crd mode scenario +``` + +Start a shell into the SPIRE Server and run the entry show command by executing: + +```console +$ insert command to see registration entries +``` + +The output should show the following registration entries: + +***insert reg entries*** + +3 entries were created corresponding to the node, agent, and workload. For the node entry (the one that has the SPIRE Server SPIFFE ID as Parent ID), we see a difference in the selectors, comparing it with the selectors in the node entry created using Reconcile mode: we find out that instead of placing the node name, CRD mode stores the UID of the node where the agent is running on. As the node name is used in the SPIFFE ID assigned to the node, we can take this as a mapping from node UID to node name. + +Something similar happens with the pod entries, but this time the pod UID where the workload is running is stored in the selectors, instead of the node UID. + +If we now focus our attention on the SPIFFE IDs assigned to the workloads, we see that it takes the form of *spiffe://\/\*. By using Annotation Based workload registration, it is possible to freely set the SPIFFE ID path. In this case, for the workload, we set the annotation value to *example-workload*. + +## Pod deletion + +As in the previous modes, if we delete the workload deployment, we will see that its corresponding registration entry will be deleted too. Let's run the command to delete the workload pod: + +```console +$ insert command to delete workload deployment +``` + +And now, check the registration entries in the SPIRE Server by executing: + +```console +insert command to see registration entries +``` + +The output should look like: + +***insert registration entries*** + +The only entries that should exist now are the ones that match the node and the SPIRE agent, because the workload one was deleted by the registrar. + +## Non-annotated pods + +Let's check if a pod that has no annotations its considered by the registrar. Deploy a new workload with this condition with the following command: + +```console +$ insert command to deploy a workload with no annotation +``` + +As in the previous section, let's see the registration entries that are present in the SPIRE Server: + +```console +insert command to see registration entries +``` + +The result of the command should be equal to the one shown in *Pod deletion* section, because no new entry has been created, as expected. + +## SPIFFE ID CRD creation + +One of the benefits of using CRD Mode is that we can manipulate the SPIFFE IDs as if they were resources inside Kubernetes environment, in other words using the *kubectl* command. + +If we check for SPIFFE IDs resources (using *kubectl get spiffeids -n spire*), we'll obtain something like the following: + +***insert the spiffeids resources*** + +From this we can see that there are 2 already created custom resources, corresponding to the 3 entries that we saw above, minus the one for the workload, whose pod was deleted in the *Pod deletion* section. + +Let's create a new SPIFFE ID CRD by using: + +```console +# insert command to create a spiffe id crd +``` + +We will check if it was created, executing the *kubectl get spiffeids -n spire* command, whose output will show 3 custom resources: + +***insert output of spiffeids resources after applying the spiffeid*** + +The resource was succesfully created, but had it any impact on the SPIRE Server? Let's execute the command below to see the registration entries: + +```console +$ insert command to see registration entries +``` + +You'll get an output similar to this: + +***insert reg entries*** + +As we can see, a SPIFFE ID CRD creation triggers a registration entry creation on the SPIRE Server too. + +## SPIFFE ID CRD deletion + +The lifecycle of a SPIFFE ID CRD can be managed by Kubernetes, and has a direct impact on the corresponding registration entry stored in the SPIRE Server. We already see how a SPIFFE ID CRD creation activates a registration entry one. We will prove that the same applies for a CRD deletion. + +Let's delete the previously created SPIFFE ID CRD, and later check for the registration entries on the server. Run the following command to delete the CRD: + +```console +$ insert command to delete a spiffe id crd +``` + +Now, we will check the registration entries: + +```console +$ insert command to see reg entries on spire server +``` + +The output from this command should look like: + +***insert reg entries*** + +As we can see, the corresponding registration entry was deleted on the SPIRE Server too. From d7e9a62647ea109bf7a6a8375ece265968bdef2f Mon Sep 17 00:00:00 2001 From: Luciano Date: Wed, 14 Apr 2021 15:41:12 -0300 Subject: [PATCH 2/5] checkpoint Signed-off-by: Luciano --- k8s/k8s-workload-registrar/README.md | 354 +++++++++++++----- k8s/k8s-workload-registrar/create-cluster.sh | 3 + k8s/k8s-workload-registrar/kind-config.yaml | 19 + .../k8s-workload-registrar-cluster-role.yaml | 31 ++ .../k8s/k8s-workload-registrar-configmap.yaml | 27 ++ .../k8s-workload-registrar-statefulset.yaml | 107 ++++++ .../mode-crd/k8s/namespace.yaml | 4 + .../mode-crd/k8s/not-annotated-workload.yaml | 35 ++ .../k8s/spiffeid.spiffe.io_spiffeids.yaml | 106 ++++++ .../mode-crd/k8s/spire-agent.yaml | 166 ++++++++ .../mode-crd/k8s/spire-server.yaml | 205 ++++++++++ .../mode-crd/k8s/test_spiffeid.yaml | 11 + .../mode-crd/k8s/workload.yaml | 37 ++ .../mode-crd/scripts/deploy-scenario.sh | 14 + .../mode-reconcile/k8s/namespace.yaml | 4 + .../k8s/not-labeled-workload.yaml | 35 ++ .../mode-reconcile/k8s/spire-agent.yaml | 169 +++++++++ .../mode-reconcile/k8s/spire-server.yaml | 292 +++++++++++++++ .../mode-reconcile/k8s/workload.yaml | 36 ++ .../mode-reconcile/scripts/deploy-scenario.sh | 9 + .../k8s/admctrl/admission-control.yaml | 8 + .../mode-webhook/k8s/admctrl/kubeconfig.yaml | 9 + .../k8s/k8s-workload-registrar-secret.yaml | 9 + .../mode-webhook/k8s/namespace.yaml | 4 + .../mode-webhook/k8s/spire-agent.yaml | 171 +++++++++ .../mode-webhook/k8s/spire-server.yaml | 334 +++++++++++++++++ .../mode-webhook/k8s/validation-webhook.yaml | 23 ++ .../mode-webhook/k8s/workload.yaml | 35 ++ .../mode-webhook/scripts/deploy-scenario.sh | 11 + 29 files changed, 2181 insertions(+), 87 deletions(-) create mode 100755 k8s/k8s-workload-registrar/create-cluster.sh create mode 100644 k8s/k8s-workload-registrar/kind-config.yaml create mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-cluster-role.yaml create mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-configmap.yaml create mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-statefulset.yaml create mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/namespace.yaml create mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/not-annotated-workload.yaml create mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/spiffeid.spiffe.io_spiffeids.yaml create mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/spire-agent.yaml create mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/spire-server.yaml create mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/test_spiffeid.yaml create mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/workload.yaml create mode 100644 k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh create mode 100644 k8s/k8s-workload-registrar/mode-reconcile/k8s/namespace.yaml create mode 100644 k8s/k8s-workload-registrar/mode-reconcile/k8s/not-labeled-workload.yaml create mode 100644 k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-agent.yaml create mode 100644 k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-server.yaml create mode 100644 k8s/k8s-workload-registrar/mode-reconcile/k8s/workload.yaml create mode 100755 k8s/k8s-workload-registrar/mode-reconcile/scripts/deploy-scenario.sh create mode 100644 k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/admission-control.yaml create mode 100644 k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/kubeconfig.yaml create mode 100644 k8s/k8s-workload-registrar/mode-webhook/k8s/k8s-workload-registrar-secret.yaml create mode 100644 k8s/k8s-workload-registrar/mode-webhook/k8s/namespace.yaml create mode 100644 k8s/k8s-workload-registrar/mode-webhook/k8s/spire-agent.yaml create mode 100644 k8s/k8s-workload-registrar/mode-webhook/k8s/spire-server.yaml create mode 100644 k8s/k8s-workload-registrar/mode-webhook/k8s/validation-webhook.yaml create mode 100644 k8s/k8s-workload-registrar/mode-webhook/k8s/workload.yaml create mode 100755 k8s/k8s-workload-registrar/mode-webhook/scripts/deploy-scenario.sh diff --git a/k8s/k8s-workload-registrar/README.md b/k8s/k8s-workload-registrar/README.md index 24a44c0..4898a51 100644 --- a/k8s/k8s-workload-registrar/README.md +++ b/k8s/k8s-workload-registrar/README.md @@ -1,19 +1,29 @@ # Configure SPIRE to use the Kubernetes Workload Registrar -This tutorial builds on the [Kubernetes Quickstart Tutorial](../quickstart/) and provides an example of how to configure SPIRE to use the Kubernetes Workload Registrar as a container within the SPIRE Server pod. With this tool, automatic workload registration and management is added to SPIRE. The changes required to deploy the registrar and the necessary files are shown as a delta to the quickstart tutorial, so it is highly encouraged to execute, or at least read through, the Kubernetes Quickstart Tutorial first. + This tutorial builds on the [Kubernetes Quickstart Tutorial](../quickstart/) to provide an example of how to configure the SPIRE Kubernetes Workload Registrar as a container within the SPIRE Server pod. The registrar enables automatic workload registration and management in SPIRE Kubernetes implementations. The changes required to deploy the registrar and the necessary files are shown as a delta to the quickstart tutorial, so it is highly encouraged to execute, or at least read through, the Kubernetes Quickstart Tutorial first. + +This tutorial demonstrates how to use the registrar's three different modes: + + * Webhook - For historical reasons, the webhook mode is the default but reconcile and CRD modes are now preferred because webhook can create StatefulSets and pods with no entries and cause other cleanup and scalability issues. + * Reconcile - The reconcile mode uses reconciling controllers rather than webhooks. It may may be slightly faster to create new entries than CRD mode and requires less configuration. + * CRD - The CRD mode provides a namespaced SpiffeID custom resource and is best for cases where you plan to manage SpiffeID custom resources directly. + +See the [Differences between modes](https://github.com/spiffe/spire/tree/master/support/k8s/k8s-workload-registrar#differences-between-modes) section of the GitHub README for more information. In this document you will learn how to: -* Deploy the K8s Workload Registrar as a container within the SPIRE Server Pod -* Configure the 3 available modes and their differences -* Use the 3 available workload registration modes -* Test successful registration entries creation + * Deploy the K8s Workload Registrar as a container within the SPIRE Server Pod + * Configure the three workload registration modes + * Use the three workload registration modes + * Test successful registration entries creation + +For documentation about SPIRE Kubernetes Workload Registrar configuration options, see the [GitHub README](https://github.com/spiffe/spire/tree/master/support/k8s/k8s-workload-registrar). -# Prerequisites -Before proceeding, review the following list: -* You'll need access to the Kubernetes environment configured when going through the [Kubernetes Quickstart Tutorial](../quickstart/). -* Required configuration files for this tutorial can be found in the `k8s/k8s-workload-registrar` directory in https://github.com/spiffe/spire-tutorials. If you didn't already clone the repo for the _Kubernetes Quickstart Tutorial_, please do so now. -* The steps in this document should work with Kubernetes version 1.20.2. + # Prerequisites + Before proceeding, review the following list: + * You'll need access to the Kubernetes environment configured when going through the [Kubernetes Quickstart Tutorial](../quickstart/). + * Required configuration files for this tutorial can be found in the `k8s/k8s-workload-registrar` directory in [https://github.com/spiffe/spire-tutorials](https://github.com/spiffe/spire-tutorials). If you didn't already clone the repo for the _Kubernetes Quickstart Tutorial_, please do so now. + * The steps in this document should work with Kubernetes version 1.20.2. -We will deploy an scenario that consists of a statefulset containing a SPIRE Server and the Kubernetes Workload Registrar, a SPIRE Agent, and a workload, and configure the different modes to illustrate the automatic registration entries creation. +We will deploy an scenario that consists of a StatefulSet containing a SPIRE Server and the Kubernetes Workload Registrar, a SPIRE Agent, and a workload, and configure the different modes to illustrate the automatic registration entries creation. # Common configuration @@ -119,13 +129,13 @@ metadata: namespace: spire data: k8s-workload-registrar.conf: | + trust_domain = "example.org" + server_socket_path = "/tmp/spire-server/private/api.sock" + cluster = "example-cluster" + mode = "webhook" cert_path = "/run/spire/k8s-workload-registrar/certs/server-cert.pem" key_path = "/run/spire/k8s-workload-registrar/secret/server-key.pem" cacert_path = "/run/spire/k8s-workload-registrar/certs/cacert.pem" - trust_domain = "example.org" - cluster = "k8stest" - server_socket_path = "/tmp/spire-server/private/api.sock" - insecure_skip_client_verification = false ``` As we can see, the `key_path` points to where the secret containing the server key is mounted, which was shown earlier. The `cert_path` and `cacert_path` points to the directory where the `ConfigMap` with the PEM encoded certificates for the server and for the CA are mounted. When the webhook is triggered, the registrar acts as the server and validates the identity of the client, which is the Kubernetes API server in this case. We can disable this authentication by setting the ```insecure_skip_client_verification``` option to `true` (though it is not recommended). @@ -155,25 +165,49 @@ plugins: kubeConfigFile: /etc/kubernetes/pki/admctrl/kubeconfig.yaml ``` -We have looked at the key points of the webhook mode's configuration, so let's apply the necessary files to set our scenario with a SPIRE Server with the registrar container in it, an Agent, and a workload, by issuing the following command: +We have looked at the key points of the webhook mode's configuration, so let's apply the necessary files to set our scenario with a SPIRE Server with the registrar container in it, an Agent, and a workload, by issuing the following command in the `mode-webhook` directory: ```console -$ insert command to deploy the scenario for the webhook mode +$ bash scripts/deploy-scenario.sh ``` This is all we need to have the registration entries created on the server. We will start a shell into the SPIRE Server container and run the entry show directive by executing the command below: ```console -$ insert command to see registration entries +$ kubectl exec -it deployment/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" ``` You should see the following 3 registration entries, corresponding to the node, the agent, and the workload. -***insert reg entries*** - -Let's see how are they built: - -The cluster name is used as Parent ID, and there is no reference to the node that the pod belongs to, this is, all the registration entries are mapped to a single node entry inside the cluster. This represents a drawback for this mode, as all the nodes in the cluster have permission to get identities for all the workloads that belong to the Kubernetes cluster, which increases the blast ratio in case of a node being compromised, among other disadvantages. +```console +Found 3 entries +Entry ID : ... +SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node +Parent ID : spiffe://example.org/spire/server +Revision : 0 +TTL : default +Selector : k8s_psat:cluster:example-cluster + +Entry ID : ... +SPIFFE ID : spiffe://example.org/ns/spire/sa/spire-agent +Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node +Revision : 0 +TTL : default +Selector : k8s:ns:spire +Selector : k8s:pod-name:spire-agent-wtx7b + +Entry ID : ... +SPIFFE ID : spiffe://example.org/ns/spire/sa/default +Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node +Revision : 0 +TTL : default +Selector : k8s:ns:spire +Selector : k8s:pod-name:example-workload-6877cd47d5-2fmpq +``` + +We omitted the entry ids, as it may change with every run. Let's see how the other fields are built: + +The cluster name *example-cluster* is used as Parent ID in all the entries, and there is no reference to the node that the pods belong to, this is, all the registration entries are mapped to a single node entry inside the cluster. This represents a drawback for this mode, as all the nodes in the cluster have permission to get identities for all the workloads that belong to the Kubernetes cluster, which increases the blast radius in case of a node being compromised, among other disadvantages. Taking a look on the assigned SPIFFE IDs for the agent and the workload, we can see that they have the following form: *spiffe://\/ns/\/sa/\*. @@ -181,42 +215,61 @@ From this, we can conclude that we are using the registrar configured with the S Another thing that is worth looking, is the registrar log, in which we will found out if the entries were created by this container. Run the following command to get the logs of the registrar, and to look for the *Created pod entry* keyword. -***insert command to get the logs and grep over the desired keyword*** - -The result should be similar to the one shown below: - -***insert output of the command above*** +```console +kubectl logs deployment/spire-server -n spire -c k8s-workload-registrar | grep "Created pod entry" +``` -We can check that the 3 entries that were present on the SPIRE Server were created by the registrar, and correspond to the node, agent, and workload, in that specific order. +From the output of this command, we can conclude that the 3 entries that were present on the SPIRE Server were created by the registrar, and correspond to the node, agent, and workload, in that specific order. ## Pod deletion Let's see how the registrar handles a pod deletion, and which impact does it have on the registration entries. Run the following command to delete the workload deployment: ```console -$ insert command to delete workload pod +$ kubectl delete deployment/example-workload -n spire ``` + Again, check for the registration entries with the command below: ```console -$ insert command to see registration entries +$ kubectl exec -it deployment/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" ``` -The output of the command will not include the registration entry that correspond to the workload, because the pod was deleted, and should be similar to this: +The output of the command will not include the registration entry that correspond to the workload, because the pod was deleted, and should be similar to: -***insert reg entries*** +```console +Found 2 entries +Entry ID : ... +SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node +Parent ID : spiffe://example.org/spire/server +Revision : 0 +TTL : default +Selector : k8s_psat:cluster:example-cluster + +Entry ID : ... +SPIFFE ID : spiffe://example.org/ns/spire/sa/spire-agent +Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node +Revision : 0 +TTL : default +Selector : k8s:ns:spire +Selector : k8s:pod-name:spire-agent-wtx7b +``` As the pod was deleted, we will check the registrar logs, looking for the "Deleted pod entry" keyword, with the command shown below: ```console -$ insert command to get the logs and grep over the desired keyword +$ kubectl logs deployment/spire-server -n spire -c k8s-workload-registrar | grep "Deleting pod entries" ``` -The output should be similar to: +From which we can conclude that the registrar successfuly deleted the corresponding entry of the *example-workload* pod. + +## Teardown -***insert output of the command above*** +To delete the resources used for this mode, we will delete the cluster by executing: -From which we can conclude that the registrar successfuly deleted the corresponding entry of the recently deleted pod. +```console +kind delete cluster --name example-cluster +``` # Reconcile mode @@ -232,31 +285,56 @@ metadata: namespace: spire data: k8s-workload-registrar.conf: | - mode = "reconcile" trust_domain = "example.org" - cluster = "k8stest" server_socket_path = "/tmp/spire-server/private/api.sock" - metrics_addr = "0" + cluster = "example-cluster" + mode = "reconcile" pod_label = "spire-workload" + metrics_addr = "0" ``` We are explicitly indicating that *reconcile* mode is used. For the sake of the tutorial, we will be using Label Based workload registration for this mode (as we can see from the `pod_label` configurable), though every workload registration mode can be used with every registrar mode. This is all the configuration that is needed to have the containers working properly. -We will deploy the same scenario as the previous mode, with the difference on the agent and workload pods: they will be labeled with the *spire-workload* label, that corresponds to the value indicated in the `pod_label` option of the `ConfigMap` shown above. Run the following command to set the scenario: +We will deploy the same scenario as the previous mode, with the difference on the agent and workload pods: they will be labeled with the *spire-workload* label, that corresponds to the value indicated in the `pod_label` option of the `ConfigMap` shown above. Ensure that your working directory is `mode-reconcile` and run the following command to set the scenario: ```console -$ insert command to deploy the scenario for the reconcile mode +$ bash scripts/deploy-scenario.sh ``` With the Reconcile scenario set, we will check the registration entries and some special considerations for this mode. Let's issue the command below to start a shell into the SPIRE Server container, and to show the existing registration entries. ```console -$ insert command to see registration entries +$ kubectl exec -it deployment/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" ``` Your output should similar to the following, and shows the entries for the node, the agent and the workload: -***insert reg entries*** +```console +Entry ID : 84bb478d-2ec7-448c-86f6-51c8970c60ab +SPIFFE ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane +Parent ID : spiffe://example.org/spire/server +Revision : 0 +TTL : default +Selector : k8s_psat:agent_node_name:example-cluster-control-plane +Selector : k8s_psat:cluster:example-cluster + +Found 3 entries +Entry ID : 5ea1a895-d144-49fe-9d58-bbc7ad903bee +SPIFFE ID : spiffe://example.org/agent +Parent ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane +Revision : 0 +TTL : default +Selector : k8s:ns:spire +Selector : k8s:pod-name:spire-agent-c5c5f + +Entry ID : f9606cee-0773-4228-8440-ea2bac9ca3ed +SPIFFE ID : spiffe://example.org/example-workload +Parent ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane +Revision : 0 +TTL : default +Selector : k8s:ns:spire +Selector : k8s:pod-name:example-workload-b98cc787d-kzxz6 +``` If we compare this entries to the Webhook mode ones, the difference is that the Parent ID of the SVID contains a reference to the node name where the pod is scheduled on. We mentioned that this is not happening using the Webhook node, and this was one of its principal drawbacks. Also, for the node registration entry (the one that has the SPIRE Server SPIFFE ID as the Parent ID), node name is used in the selectors, along with the cluster name. For the remaining two entries, pod name and namespace are used in the selectors instead. @@ -265,64 +343,77 @@ As we are using Label workload registration mode, the SPIFFE ID's for the agent Let's check if the registrar indeed created the registration entries, by checking its logs, and looking for the *Created new spire entry* keyword. Run the command that is shown below: ```console -$ insert command to see the registrar logs +$ kubectl logs deployment/spire-server -n spire -c k8s-workload-registrar | grep "Created new spire entry" ``` -The output will be similar to this: - -***insert output of show logs command*** - -We mentioned before that there were two reconciling controllers, and we are seeing now that the node controller created the entry for the single node in the cluster, and that the pod controller created the entries for the two labeled pods: agent and workload. +We mentioned before that there were two reconciling controllers, and from the output of the command above, we can see that the node controller created the entry for the single node in the cluster, and that the pod controller created the entries for the two labeled pods: agent and workload. ## Pod deletion The Kubernetes Workload Registrar automatically handles the creation and deletion of registration entries. We already see how the entries are created, and now we will test its deletion. Let's delete the workload deployment: ```console -$ insert command to delete workload deployment +$ kubectl delete deployment/example-workload -n spire ``` We will check if its corresponding entry is deleted too. Run the following command to see the registration entries on the SPIRE Server: ```console -$ insert command to see registration entries +$ kubectl exec -it deployment/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" ``` The output will only show two registration entries, because the workload entry was deleted by the registrar: -***insert reg entries*** +```console +Found 2 entries +Entry ID : 5ea1a895-d144-49fe-9d58-bbc7ad903bee +SPIFFE ID : spiffe://example.org/agent +Parent ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane +Revision : 0 +TTL : default +Selector : k8s:ns:spire +Selector : k8s:pod-name:spire-agent-c5c5f + +Entry ID : 84bb478d-2ec7-448c-86f6-51c8970c60ab +SPIFFE ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane +Parent ID : spiffe://example.org/spire/server +Revision : 0 +TTL : default +Selector : k8s_psat:agent_node_name:example-cluster-control-plane +Selector : k8s_psat:cluster:example-cluster +``` If we look for the *Deleted entry* keyword on the registrar logs, we will find out that the registrar deleted the entry. Issue the following command: ```console -$ insert command to see registrar logs +$ kubectl logs deployment/spire-server -n spire -c k8s-workload-registrar | grep "Deleted entry" ``` -The output should be similar to: - -***insert output of above command*** - -The registrar successfuly deleted the entry. +The pod controller successfuly deleted the entry. ## Non-labeled pods -As we are using Label Based workload registration, only pods that have the label *spire-workload* will have its registration entry automatically created. Let's deploy a pod that has no label with the command below: +As we are using Label Based workload registration, only pods that have the *spire-workload* label will have its registration entry automatically created. Let's deploy a pod that has no label with the command below, by executing the comand below, from the `mode-reconcile` directory: ```console -$ insert command to deploy a non-labeled workload +$ kubectl apply -f k8s/not-labeled-workload.yaml ``` Let's see the existing registration entries with the command: ```console -$ insert command to see registration entries +$ kubectl exec -it deployment/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" ``` -It's output should be similar to: +The output should remain constant, compared to the one that we obtained in the *Pod deletion* section. This is, the only two registration entries on the SPIRE Server corresponds to the labeled deployed resources. This is the expected behaviour, as only labeled pods will be considered by the workload registrar while using the Label Workload registration mode. + +## Teardown -***insert reg entries*** +To delete the resources used for this mode, we will delete the cluster by executing: -We see that the entries are the same as before, and that no entry has been created for the new workload. This is the expected behaviour, as only labeled pods will be considered by the workload registrar while using the Label Workload registration mode. +```console +kind delete cluster --name example-cluster +``` # CRD mode @@ -356,7 +447,7 @@ data: k8s-workload-registrar.conf: | trust_domain = "example.org" server_socket_path = "/tmp/spire-server/private/api.sock" - cluster = "k8stest" + cluster = "example-cluster" mode = "crd" pod_annotation = "spiffe.io/spiffe-id" ``` @@ -364,18 +455,47 @@ data: Let's deploy the necessary files, including the base scenario plus the SPIFFE ID CRD definition, and examine the automatically created registration entries. ```console -$ insert command to deploy the crd mode scenario +$ bash scripts/deploy-scenario.sh ``` Start a shell into the SPIRE Server and run the entry show command by executing: ```console -$ insert command to see registration entries +$ kubectl exec -it deployment/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" ``` The output should show the following registration entries: -***insert reg entries*** +```console +Found 3 entries +Entry ID : cc0e9405-939d-4b45-96d7-e160c89cf6f5 +SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +Parent ID : spiffe://example.org/spire/server +Revision : 1 +TTL : default +Selector : k8s_psat:agent_node_uid:08990bfd-3551-4761-8a1b-2e652984ffdd +Selector : k8s_psat:cluster:example-cluster + +Entry ID : b9b3b92b-06a2-4619-8fca-31ed1ea0138d +SPIFFE ID : spiffe://example.org/testing/agent +Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +Revision : 1 +TTL : default +Selector : k8s:node-name:example-cluster-control-plane +Selector : k8s:ns:spire +Selector : k8s:pod-uid:538886bb-48e1-4795-b386-10e97f50e34f +DNS name : spire-agent-jzc8w + +Entry ID : 561cc364-35fd-426e-9de6-e5db0605d1a1 +SPIFFE ID : spiffe://example.org/testing/example-workload +Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +Revision : 1 +TTL : default +Selector : k8s:node-name:example-cluster-control-plane +Selector : k8s:ns:spire +Selector : k8s:pod-uid:78ed3fc5-4cff-476a-90f5-37d3abd47823 +DNS name : example-workload-6877cd47d5-l4hv5 +``` 3 entries were created corresponding to the node, agent, and workload. For the node entry (the one that has the SPIRE Server SPIFFE ID as Parent ID), we see a difference in the selectors, comparing it with the selectors in the node entry created using Reconcile mode: we find out that instead of placing the node name, CRD mode stores the UID of the node where the agent is running on. As the node name is used in the SPIFFE ID assigned to the node, we can take this as a mapping from node UID to node name. @@ -388,33 +508,52 @@ If we now focus our attention on the SPIFFE IDs assigned to the workloads, we se As in the previous modes, if we delete the workload deployment, we will see that its corresponding registration entry will be deleted too. Let's run the command to delete the workload pod: ```console -$ insert command to delete workload deployment +$ kubectl delete deployment/example-workload -n spire ``` And now, check the registration entries in the SPIRE Server by executing: ```console -insert command to see registration entries +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" ``` The output should look like: -***insert registration entries*** +```console +Found 2 entries +Entry ID : cc0e9405-939d-4b45-96d7-e160c89cf6f5 +SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +Parent ID : spiffe://example.org/spire/server +Revision : 1 +TTL : default +Selector : k8s_psat:agent_node_uid:08990bfd-3551-4761-8a1b-2e652984ffdd +Selector : k8s_psat:cluster:example-cluster + +Entry ID : b9b3b92b-06a2-4619-8fca-31ed1ea0138d +SPIFFE ID : spiffe://example.org/testing/agent +Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +Revision : 1 +TTL : default +Selector : k8s:node-name:example-cluster-control-plane +Selector : k8s:ns:spire +Selector : k8s:pod-uid:538886bb-48e1-4795-b386-10e97f50e34f +DNS name : spire-agent-jzc8w +``` The only entries that should exist now are the ones that match the node and the SPIRE agent, because the workload one was deleted by the registrar. ## Non-annotated pods -Let's check if a pod that has no annotations its considered by the registrar. Deploy a new workload with this condition with the following command: +Let's check if a pod that has no annotations its considered by the registrar. Ensure that your working directory is `mode-crd` and deploy a new workload with this condition with the following command: ```console -$ insert command to deploy a workload with no annotation +$ kubectl apply -f k8s/not-annotated-workload.yaml ``` As in the previous section, let's see the registration entries that are present in the SPIRE Server: ```console -insert command to see registration entries +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" ``` The result of the command should be equal to the one shown in *Pod deletion* section, because no new entry has been created, as expected. @@ -425,29 +564,65 @@ One of the benefits of using CRD Mode is that we can manipulate the SPIFFE IDs a If we check for SPIFFE IDs resources (using *kubectl get spiffeids -n spire*), we'll obtain something like the following: -***insert the spiffeids resources*** +```console +NAME AGE +example-cluster-control-plane 11m +spire-agent-jzc8w 11m +``` -From this we can see that there are 2 already created custom resources, corresponding to the 3 entries that we saw above, minus the one for the workload, whose pod was deleted in the *Pod deletion* section. +From this, we can see that there are two already created custom resources, corresponding to the three entries that we obtained when the scenario was deployed, minus the one for the annotated workload, whose pod was deleted in the *Pod deletion* section. -Let's create a new SPIFFE ID CRD by using: +From the `mode-crd` folder, let's create a new SPIFFE ID CRD by using: ```console -# insert command to create a spiffe id crd +# kubectl apply -f k8s/test_spiffeid.yaml ``` We will check if it was created, executing the *kubectl get spiffeids -n spire* command, whose output will show 3 custom resources: -***insert output of spiffeids resources after applying the spiffeid*** +```console +NAME AGE +example-cluster-control-plane 19m +my-test-spiffeid 19s +spire-agent-jzc8w 18m +``` The resource was succesfully created, but had it any impact on the SPIRE Server? Let's execute the command below to see the registration entries: ```console -$ insert command to see registration entries +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" ``` You'll get an output similar to this: -***insert reg entries*** +```console +Found 3 entries +Entry ID : cc0e9405-939d-4b45-96d7-e160c89cf6f5 +SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +Parent ID : spiffe://example.org/spire/server +Revision : 1 +TTL : default +Selector : k8s_psat:agent_node_uid:08990bfd-3551-4761-8a1b-2e652984ffdd +Selector : k8s_psat:cluster:example-cluster + +Entry ID : 0524528a-ca5b-452b-b5f9-7e9cb5652446 +SPIFFE ID : spiffe://example.org/test +Parent ID : spiffe://example.org/spire/server +Revision : 1 +TTL : default +Selector : k8s:ns:spire +Selector : k8s:pod-name:my-test-pod + +Entry ID : b9b3b92b-06a2-4619-8fca-31ed1ea0138d +SPIFFE ID : spiffe://example.org/testing/agent +Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +Revision : 1 +TTL : default +Selector : k8s:node-name:example-cluster-control-plane +Selector : k8s:ns:spire +Selector : k8s:pod-uid:538886bb-48e1-4795-b386-10e97f50e34f +DNS name : spire-agent-jzc8w +``` As we can see, a SPIFFE ID CRD creation triggers a registration entry creation on the SPIRE Server too. @@ -458,17 +633,22 @@ The lifecycle of a SPIFFE ID CRD can be managed by Kubernetes, and has a direct Let's delete the previously created SPIFFE ID CRD, and later check for the registration entries on the server. Run the following command to delete the CRD: ```console -$ insert command to delete a spiffe id crd +$ kubectl delete spiffeid/my-test-spiffeid -n spire ``` Now, we will check the registration entries: ```console -$ insert command to see reg entries on spire server +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" ``` -The output from this command should look like: +The output from this command should include only the entries for the node and the agent, because the recently created SPIFFE ID CRD was deleted. + +## Teardown -***insert reg entries*** -As we can see, the corresponding registration entry was deleted on the SPIRE Server too. +To delete the resources used for this mode, we will delete the cluster by executing: + +```console +kind delete cluster --name example-cluster +``` diff --git a/k8s/k8s-workload-registrar/create-cluster.sh b/k8s/k8s-workload-registrar/create-cluster.sh new file mode 100755 index 0000000..7f9ad10 --- /dev/null +++ b/k8s/k8s-workload-registrar/create-cluster.sh @@ -0,0 +1,3 @@ +sed -i.bak "s#K8SDIR#${PWD}/mode-webhook/k8s#g" kind-config.yaml +rm kind-config.yaml.bak +kind create cluster --name example-cluster --config kind-config.yaml diff --git a/k8s/k8s-workload-registrar/kind-config.yaml b/k8s/k8s-workload-registrar/kind-config.yaml new file mode 100644 index 0000000..d91d6ae --- /dev/null +++ b/k8s/k8s-workload-registrar/kind-config.yaml @@ -0,0 +1,19 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +kubeadmConfigPatches: +- | + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + metadata: + name: config + apiServer: + extraArgs: + "service-account-signing-key-file": "/etc/kubernetes/pki/sa.key" + "service-account-issuer": "api" + "service-account-api-audiences": "api,spire-server" + "admission-control-config-file": "/etc/kubernetes/pki/admctrl/admission-control.yaml" +nodes: +- role: control-plane + extraMounts: + - containerPath: /etc/kubernetes/pki/admctrl + hostPath: K8SDIR/admctrl diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-cluster-role.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-cluster-role.yaml new file mode 100644 index 0000000..6efd997 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-cluster-role.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: k8s-workload-registrar-role +rules: +- apiGroups: [""] + resources: ["endpoints", "nodes", "pods"] + verbs: ["get", "list", "watch"] +- apiGroups: ["spiffeid.spiffe.io"] + resources: ["spiffeids"] + verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] +- apiGroups: ["spiffeid.spiffe.io"] + resources: ["spiffeids/status"] + verbs: ["get", "patch", "update"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["get", "list", "update", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: k8s-workload-registrar-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: k8s-workload-registrar-role +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-configmap.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-configmap.yaml new file mode 100644 index 0000000..1a17c79 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-configmap.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-workload-registrar + namespace: spire +data: + k8s-workload-registrar.conf: | + trust_domain = "example.org" + server_socket_path = "/tmp/spire-server/private/api.sock" + cluster = "example-cluster" + mode = "crd" + pod_annotation = "spiffe.io/spiffe-id" + + +# apiVersion: v1 +# kind: ConfigMap +# metadata: +# name: k8s-workload-registrar +# namespace: spire +# data: +# k8s-workload-registrar.conf: | +# trust_domain = "example.org" +# server_socket_path = "/tmp/spire-server/private/api.sock" +# cluster = "k8stest" +# mode = "reconcile" +# pod_label = "spire-workload" +# metrics_addr = "0" diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-statefulset.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-statefulset.yaml new file mode 100644 index 0000000..5220024 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-statefulset.yaml @@ -0,0 +1,107 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: spire-server + namespace: spire + labels: + app: spire-server +spec: + replicas: 1 + selector: + matchLabels: + app: spire-server + serviceName: spire-server + template: + metadata: + namespace: spire + labels: + app: spire-server + spec: + serviceAccountName: spire-server + shareProcessNamespace: true + containers: + - name: spire-server + image: gcr.io/spiffe-io/spire-server:0.12.0 + args: + - -config + - /run/spire/config/server.conf + livenessProbe: + exec: + command: ["/opt/spire/bin/spire-server", "healthcheck", "-registrationUDSPath", "/tmp/spire-server/private/api.sock"] + failureThreshold: 2 + initialDelaySeconds: 15 + periodSeconds: 60 + timeoutSeconds: 3 + readinessProbe: + exec: + command: ["/opt/spire/bin/spire-server", "healthcheck", "--shallow", "-registrationUDSPath", "/tmp/spire-server/private/api.sock"] + initialDelaySeconds: 5 + periodSeconds: 5 + ports: + - containerPort: 8081 + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-secrets + mountPath: /run/spire/secrets + readOnly: true + - name: spire-data + mountPath: /run/spire/data + readOnly: false + - name: spire-registration-socket + mountPath: /tmp/spire-server/private + readOnly: false + - name: k8s-workload-registrar + # image: fmemon/k8s-workload-registrar:latest + image: gcr.io/spiffe-io/k8s-workload-registrar:0.12.0 + args: + - -config + - /run/spire/config/k8s-workload-registrar.conf + # env: + # - name: MY_POD_UID + # valueFrom: + # fieldRef: + # fieldPath: metadata.uid + ports: + - containerPort: 9443 + name: webhook + protocol: TCP + volumeMounts: + - mountPath: /run/spire/config + name: k8s-workload-registrar-config + readOnly: true + # - mountPath: /tmp/spire-agent/public # is it necessary?? i think not + # name: spire-agent-socket + # readOnly: true + - name: spire-registration-socket + mountPath: /tmp/spire-server/private + readOnly: true + volumes: + - name: spire-config + configMap: + name: spire-server + - name: spire-secrets + secret: + secretName: spire-server + # - name: spire-agent-socket + # hostPath: + # path: /run/spire/agent-sockets + # type: DirectoryOrCreate + - name: k8s-workload-registrar-config + configMap: + name: k8s-workload-registrar + - name: spire-registration-socket + hostPath: + path: /run/spire/server-sockets + type: DirectoryOrCreate + volumeClaimTemplates: + - metadata: + name: spire-data + namespace: spire + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/namespace.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/namespace.yaml new file mode 100644 index 0000000..c6ba349 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: spire diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/not-annotated-workload.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/not-annotated-workload.yaml new file mode 100644 index 0000000..9aad1c7 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/not-annotated-workload.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-workload + namespace: spire + labels: + app: example-workload +spec: + selector: + matchLabels: + app: example-workload + template: + metadata: + namespace: spire + labels: + app: example-workload + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: example-workload + image: gcr.io/spiffe-io/spire-agent:0.12.0 + imagePullPolicy: IfNotPresent + command: ["/usr/bin/dumb-init", "/opt/spire/bin/spire-agent", "api", "watch"] + args: ["-socketPath", "/tmp/spire-agent/public/api.sock"] + volumeMounts: + - name: spire-agent-socket + mountPath: /tmp/spire-agent/public + readOnly: true + volumes: + - name: spire-agent-socket + hostPath: + path: /run/spire/agent-sockets + type: Directory diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/spiffeid.spiffe.io_spiffeids.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/spiffeid.spiffe.io_spiffeids.yaml new file mode 100644 index 0000000..302c2b7 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/spiffeid.spiffe.io_spiffeids.yaml @@ -0,0 +1,106 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.4 + creationTimestamp: null + name: spiffeids.spiffeid.spiffe.io +spec: + group: spiffeid.spiffe.io + names: + kind: SpiffeID + listKind: SpiffeIDList + plural: spiffeids + singular: spiffeid + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: SpiffeID is the Schema for the spiffeid API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SpiffeIDSpec defines the desired state of SpiffeID + properties: + dnsNames: + items: + type: string + type: array + parentId: + type: string + selector: + properties: + arbitrary: + description: Arbitrary selectors + items: + type: string + type: array + containerImage: + description: Container image to match for this spiffe ID + type: string + containerName: + description: Container name to match for this spiffe ID + type: string + namespace: + description: Namespace to match for this spiffe ID + type: string + nodeName: + description: Node name to match for this spiffe ID + type: string + podLabel: + additionalProperties: + type: string + description: Pod label name/value to match for this spiffe ID + type: object + podName: + description: Pod name to match for this spiffe ID + type: string + podUid: + description: Pod UID to match for this spiffe ID + type: string + serviceAccount: + description: ServiceAccount to match for this spiffe ID + type: string + type: object + spiffeId: + type: string + required: + - parentId + - selector + - spiffeId + type: object + status: + description: SpiffeIDStatus defines the observed state of SpiffeID + properties: + entryId: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + type: object + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/spire-agent.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/spire-agent.yaml new file mode 100644 index 0000000..532e640 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/spire-agent.yaml @@ -0,0 +1,166 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-agent + namespace: spire + +--- + +# Required cluster role to allow spire-agent to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role +rules: +- apiGroups: [""] + resources: ["pods","nodes", "nodes/proxy"] + verbs: ["get"] + +--- + +# Binds above cluster role to spire-agent service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role-binding +subjects: +- kind: ServiceAccount + name: spire-agent + namespace: spire +roleRef: + kind: ClusterRole + name: spire-agent-cluster-role + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-agent + namespace: spire +data: + agent.conf: | + agent { + data_dir = "/run/spire" + log_level = "DEBUG" + server_address = "spire-server" + server_port = "8081" + socket_path = "/tmp/spire-agent/public/api.sock" + trust_bundle_path = "/run/spire/config/bootstrap.crt" + trust_domain = "example.org" + } + + plugins { + NodeAttestor "k8s_psat" { + plugin_data { + cluster = "example-cluster" + } + } + + KeyManager "memory" { + plugin_data { + } + } + + WorkloadAttestor "k8s" { + plugin_data { + # Defaults to the secure kubelet port by default. + # Minikube does not have a cert in the cluster CA bundle that + # can authenticate the kubelet cert, so skip validation. + skip_kubelet_verification = true + } + } + + WorkloadAttestor "unix" { + plugin_data { + } + } + } + bootstrap.crt: | + -----BEGIN CERTIFICATE----- + MIIBzDCCAVOgAwIBAgIJAJM4DhRH0vmuMAoGCCqGSM49BAMEMB4xCzAJBgNVBAYT + AlVTMQ8wDQYDVQQKDAZTUElGRkUwHhcNMTgwNTEzMTkzMzQ3WhcNMjMwNTEyMTkz + MzQ3WjAeMQswCQYDVQQGEwJVUzEPMA0GA1UECgwGU1BJRkZFMHYwEAYHKoZIzj0C + AQYFK4EEACIDYgAEWjB+nSGSxIYiznb84xu5WGDZj80nL7W1c3zf48Why0ma7Y7m + CBKzfQkrgDguI4j0Z+0/tDH/r8gtOtLLrIpuMwWHoe4vbVBFte1vj6Xt6WeE8lXw + cCvLs/mcmvPqVK9jo10wWzAdBgNVHQ4EFgQUh6XzV6LwNazA+GTEVOdu07o5yOgw + DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwGQYDVR0RBBIwEIYOc3Bp + ZmZlOi8vbG9jYWwwCgYIKoZIzj0EAwQDZwAwZAIwE4Me13qMC9i6Fkx0h26y09QZ + IbuRqA9puLg9AeeAAyo5tBzRl1YL0KNEp02VKSYJAjBdeJvqjJ9wW55OGj1JQwDF + D7kWeEB6oMlwPbI/5hEY3azJi16I0uN1JSYTSWGSqWc= + -----END CERTIFICATE----- + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: spire-agent + namespace: spire + labels: + app: spire-agent +spec: + selector: + matchLabels: + app: spire-agent + template: + metadata: + namespace: spire + labels: + app: spire-agent + annotations: + spiffe.io/spiffe-id: "testing/agent" + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: spire-agent + initContainers: + - name: init + # This is a small image with wait-for-it, choose whatever image + # you prefer that waits for a service to be up. This image is built + # from https://github.com/lqhl/wait-for-it + image: gcr.io/spiffe-io/wait-for-it + args: ["-t", "30", "spire-server:8081"] + containers: + - name: spire-agent + image: gcr.io/spiffe-io/spire-agent:0.12.0 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/agent.conf"] + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-agent-socket + mountPath: /tmp/spire-agent/public + readOnly: false + - name: spire-token + mountPath: /var/run/secrets/tokens + livenessProbe: + exec: + command: ["/opt/spire/bin/spire-agent", "healthcheck", "-socketPath", "/tmp/spire-agent/public/api.sock"] + failureThreshold: 2 + initialDelaySeconds: 15 + periodSeconds: 60 + timeoutSeconds: 3 + readinessProbe: + exec: + command: ["/opt/spire/bin/spire-agent", "healthcheck", "-socketPath", "/tmp/spire-agent/public/api.sock", "--shallow"] + initialDelaySeconds: 5 + periodSeconds: 5 + volumes: + - name: spire-config + configMap: + name: spire-agent + - name: spire-agent-socket + hostPath: + path: /tmp/spire-agent/public + type: DirectoryOrCreate + - name: spire-token + projected: + sources: + - serviceAccountToken: + path: spire-agent + expirationSeconds: 7200 + audience: spire-server diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/spire-server.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/spire-server.yaml new file mode 100644 index 0000000..66ac849 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/spire-server.yaml @@ -0,0 +1,205 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-server + namespace: spire + +--- + +# Required cluster role to allow spire-server to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role +rules: +- apiGroups: [""] + resources: ["pods", "nodes"] + verbs: ["get"] +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + +--- + +# Binds above cluster role to spire-server service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role-binding +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: ClusterRole + name: spire-server-cluster-role + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: v1 +kind: Secret +metadata: + name: spire-server + namespace: spire +type: Opaque +data: + bootstrap.key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1JR2tBZ0VCQkRBZzJMYnVsWHpRWDFORisyRGkwUkt6TVdmRUdpb0JoaC9mRnB4N3lPRXFrYS8vVHBhZVUzTzUKUUpSWlhkV0hLdWFnQndZRks0RUVBQ0toWkFOaUFBUmFNSDZkSVpMRWhpTE9kdnpqRzdsWVlObVB6U2N2dGJWegpmTi9qeGFITFNacnRqdVlJRXJOOUNTdUFPQzRqaVBSbjdUKzBNZit2eUMwNjBzdXNpbTR6QlllaDdpOXRVRVcxCjdXK1BwZTNwWjRUeVZmQndLOHV6K1p5YTgrcFVyMk09Ci0tLS0tRU5EIEVDIFBSSVZBVEUgS0VZLS0tLS0K + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-server + namespace: spire +data: + server.conf: | + server { + bind_address = "0.0.0.0" + bind_port = "8081" + trust_domain = "example.org" + data_dir = "/run/spire/data" + log_level = "DEBUG" + default_svid_ttl = "1h" + registration_uds_path = "/tmp/spire-server/private/api.sock" + ca_subject = { + country = ["US"], + organization = ["SPIFFE"], + common_name = "", + } + } + + plugins { + DataStore "sql" { + plugin_data { + database_type = "sqlite3" + connection_string = "/run/spire/data/datastore.sqlite3" + } + } + + NodeAttestor "k8s_psat" { + plugin_data { + clusters = { + "example-cluster" = { + service_account_whitelist = ["spire:spire-agent"] + } + } + } + } + + NodeResolver "noop" { + plugin_data {} + } + + KeyManager "disk" { + plugin_data { + keys_path = "/run/spire/data/keys.json" + } + } + + UpstreamAuthority "disk" { + plugin_data { + key_file_path = "/run/spire/secrets/bootstrap.key" + cert_file_path = "/run/spire/config/bootstrap.crt" + } + } + } + bootstrap.crt: | + -----BEGIN CERTIFICATE----- + MIIBzDCCAVOgAwIBAgIJAJM4DhRH0vmuMAoGCCqGSM49BAMEMB4xCzAJBgNVBAYT + AlVTMQ8wDQYDVQQKDAZTUElGRkUwHhcNMTgwNTEzMTkzMzQ3WhcNMjMwNTEyMTkz + MzQ3WjAeMQswCQYDVQQGEwJVUzEPMA0GA1UECgwGU1BJRkZFMHYwEAYHKoZIzj0C + AQYFK4EEACIDYgAEWjB+nSGSxIYiznb84xu5WGDZj80nL7W1c3zf48Why0ma7Y7m + CBKzfQkrgDguI4j0Z+0/tDH/r8gtOtLLrIpuMwWHoe4vbVBFte1vj6Xt6WeE8lXw + cCvLs/mcmvPqVK9jo10wWzAdBgNVHQ4EFgQUh6XzV6LwNazA+GTEVOdu07o5yOgw + DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwGQYDVR0RBBIwEIYOc3Bp + ZmZlOi8vbG9jYWwwCgYIKoZIzj0EAwQDZwAwZAIwE4Me13qMC9i6Fkx0h26y09QZ + IbuRqA9puLg9AeeAAyo5tBzRl1YL0KNEp02VKSYJAjBdeJvqjJ9wW55OGj1JQwDF + D7kWeEB6oMlwPbI/5hEY3azJi16I0uN1JSYTSWGSqWc= + -----END CERTIFICATE----- + +--- + +# apiVersion: apps/v1 +# kind: StatefulSet +# metadata: +# name: spire-server +# namespace: spire +# labels: +# app: spire-server +# spec: +# replicas: 1 +# selector: +# matchLabels: +# app: spire-server +# serviceName: spire-server +# template: +# metadata: +# namespace: spire +# labels: +# app: spire-server +# spec: +# serviceAccountName: spire-server +# containers: +# - name: spire-server +# image: gcr.io/spiffe-io/spire-server:0.11.0 +# args: ["-config", "/run/spire/config/server.conf"] +# ports: +# - containerPort: 8081 +# volumeMounts: +# - name: spire-config +# mountPath: /run/spire/config +# readOnly: true +# - name: spire-secrets +# mountPath: /run/spire/secrets +# readOnly: true +# - name: spire-data +# mountPath: /run/spire/data +# readOnly: false +# livenessProbe: +# exec: +# command: ["/opt/spire/bin/spire-server", "healthcheck"] +# failureThreshold: 2 +# initialDelaySeconds: 15 +# periodSeconds: 60 +# timeoutSeconds: 3 +# readinessProbe: +# exec: +# command: ["/opt/spire/bin/spire-server", "healthcheck", "--shallow"] +# initialDelaySeconds: 5 +# periodSeconds: 5 +# volumes: +# - name: spire-config +# configMap: +# name: spire-server +# - name: spire-secrets +# secret: +# secretName: spire-server +# volumeClaimTemplates: +# - metadata: +# name: spire-data +# namespace: spire +# spec: +# accessModes: +# - ReadWriteOnce +# resources: +# requests: +# storage: 1Gi + +# --- + +apiVersion: v1 +kind: Service +metadata: + name: spire-server + namespace: spire +spec: + type: NodePort + ports: + - name: grpc + port: 8081 + targetPort: 8081 + protocol: TCP + selector: + app: spire-server diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/test_spiffeid.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/test_spiffeid.yaml new file mode 100644 index 0000000..2a5ba52 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/test_spiffeid.yaml @@ -0,0 +1,11 @@ +apiVersion: spiffeid.spiffe.io/v1beta1 +kind: SpiffeID +metadata: + name: my-test-spiffeid + namespace: spire +spec: + parentId: spiffe://example.org/spire/server + selector: + namespace: spire + podName: my-test-pod + spiffeId: spiffe://example.org/test diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/workload.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/workload.yaml new file mode 100644 index 0000000..a004396 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/workload.yaml @@ -0,0 +1,37 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-workload + namespace: spire + labels: + app: example-workload +spec: + selector: + matchLabels: + app: example-workload + template: + metadata: + namespace: spire + labels: + app: example-workload + annotations: + spiffe.io/spiffe-id: "testing/example-workload" + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: example-workload + image: gcr.io/spiffe-io/spire-agent:0.12.0 + imagePullPolicy: IfNotPresent + command: ["/usr/bin/dumb-init", "/opt/spire/bin/spire-agent", "api", "watch"] + args: ["-socketPath", "/tmp/spire-agent/public/api.sock"] + volumeMounts: + - name: spire-agent-socket + mountPath: /tmp/spire-agent/public + readOnly: true + volumes: + - name: spire-agent-socket + hostPath: + path: /run/spire/agent-sockets + type: Directory diff --git a/k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh b/k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh new file mode 100644 index 0000000..2517f8c --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh @@ -0,0 +1,14 @@ +kubectl apply -f k8s/namespace.yaml +kubectl apply -f k8s/spiffeid.spiffe.io_spiffeids.yaml +kubectl apply -f k8s/k8s-workload-registrar-cluster-role.yaml +kubectl apply -f k8s/spire-server.yaml +kubectl apply -f k8s/k8s-workload-registrar-configmap.yaml +kubectl apply -f k8s/k8s-workload-registrar-statefulset.yaml + +kubectl rollout status statefulset/spire-server -n spire + +kubectl apply -f k8s/spire-agent.yaml + +kubectl rollout status daemonset/spire-agent -n spire + +kubectl apply -f k8s/workload.yaml # doesnt work diff --git a/k8s/k8s-workload-registrar/mode-reconcile/k8s/namespace.yaml b/k8s/k8s-workload-registrar/mode-reconcile/k8s/namespace.yaml new file mode 100644 index 0000000..c6ba349 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-reconcile/k8s/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: spire diff --git a/k8s/k8s-workload-registrar/mode-reconcile/k8s/not-labeled-workload.yaml b/k8s/k8s-workload-registrar/mode-reconcile/k8s/not-labeled-workload.yaml new file mode 100644 index 0000000..007c15b --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-reconcile/k8s/not-labeled-workload.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-workload + namespace: spire + labels: + app: example-workload +spec: + selector: + matchLabels: + app: example-workload + template: + metadata: + namespace: spire + labels: + app: example-workload + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: example-workload + image: gcr.io/spiffe-io/spire-agent:0.12.0 + imagePullPolicy: IfNotPresent + command: ["/usr/bin/dumb-init", "/opt/spire/bin/spire-agent", "api", "watch"] + args: ["-socketPath", "/tmp/spire-agent/public/api.sock"] + volumeMounts: + - name: spire-agent-socket + mountPath: /tmp/spire-agent/public + readOnly: true + volumes: + - name: spire-agent-socket + hostPath: + path: /run/spire/agent-sockets + type: Directory diff --git a/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-agent.yaml b/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-agent.yaml new file mode 100644 index 0000000..395d117 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-agent.yaml @@ -0,0 +1,169 @@ +# ServiceAccount for the SPIRE agent +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-agent + namespace: spire + +--- + +# Required cluster role to allow spire-agent to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role +rules: +- apiGroups: [""] + resources: ["pods","nodes","nodes/proxy"] + verbs: ["get"] + +--- + +# Binds above cluster role to spire-agent service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role-binding +subjects: +- kind: ServiceAccount + name: spire-agent + namespace: spire +roleRef: + kind: ClusterRole + name: spire-agent-cluster-role + apiGroup: rbac.authorization.k8s.io + + +--- + +# ConfigMap for the SPIRE agent featuring: +# 1) PSAT node attestation +# 2) K8S Workload Attestation over the secure kubelet port +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-agent + namespace: spire +data: + agent.conf: | + agent { + data_dir = "/run/spire" + log_level = "DEBUG" + server_address = "spire-server" + server_port = "8081" + trust_bundle_path = "/run/spire/bundle/bundle.crt" + trust_domain = "example.org" + socket_path = "/tmp/spire-agent/public/api.sock" + } + + plugins { + NodeAttestor "k8s_psat" { + plugin_data { + cluster = "example-cluster" + } + } + + KeyManager "memory" { + plugin_data { + } + } + + WorkloadAttestor "k8s" { + plugin_data { + # Defaults to the secure kubelet port by default. + # Minikube does not have a cert in the cluster CA bundle that + # can authenticate the kubelet cert, so skip validation. + skip_kubelet_verification = true + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: spire-agent + namespace: spire + labels: + app: spire-agent +spec: + selector: + matchLabels: + app: spire-agent + updateStrategy: + type: RollingUpdate + template: + metadata: + namespace: spire + labels: + app: spire-agent + spire-workload: agent + spec: + # hostPID is required for K8S Workload Attestation. + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: spire-agent + initContainers: + - name: init + # This is a small image with wait-for-it, choose whatever image + # you prefer that waits for a service to be up. This image is built + # from https://github.com/lqhl/wait-for-it + image: gcr.io/spiffe-io/wait-for-it + args: ["-t", "30", "spire-server:8081"] + containers: + - name: spire-agent + image: gcr.io/spiffe-io/spire-agent:0.12.0 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/agent.conf"] + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-bundle + mountPath: /run/spire/bundle + readOnly: true + - name: spire-agent-socket + mountPath: /tmp/spire-agent/public + readOnly: false + - name: spire-token + mountPath: /var/run/secrets/tokens + livenessProbe: + httpGet: + path: /live + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 10 + volumes: + - name: spire-config + configMap: + name: spire-agent + - name: spire-bundle + configMap: + name: spire-bundle + - name: spire-agent-socket + hostPath: + path: /run/spire/agent-sockets + type: DirectoryOrCreate + - name: spire-token + projected: + sources: + - serviceAccountToken: + path: spire-agent + expirationSeconds: 7200 + audience: spire-server diff --git a/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-server.yaml b/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-server.yaml new file mode 100644 index 0000000..39adaa3 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-server.yaml @@ -0,0 +1,292 @@ +# ServiceAccount used by the SPIRE server. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-server + namespace: spire + +--- + +# Required cluster role to allow spire-server to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role +rules: +- apiGroups: [""] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] + # allow TokenReview requests (to verify service account tokens for PSAT + # attestation) +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["get", "create"] + +--- + +# Binds above cluster role to spire-server service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role-binding + namespace: spire +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: ClusterRole + name: spire-server-cluster-role + apiGroup: rbac.authorization.k8s.io + +--- + +# Role for the SPIRE server +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: spire + name: spire-server-role +rules: + # allow "get" access to pods (to resolve selectors for PSAT attestation) +- apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE + # agent bootstrapping, see the spire-bundle ConfigMap below) +- apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["spire-bundle"] + verbs: ["get", "patch"] +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create"] +- apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["spire-k8s-registrar-leader-election"] + verbs: ["update", "get"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create"] + +--- + +# RoleBinding granting the spire-server-role to the SPIRE server +# service account. +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-role-binding + namespace: spire +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: Role + name: spire-server-role + apiGroup: rbac.authorization.k8s.io + +--- + +# ConfigMap containing the latest trust bundle for the trust domain. It is +# updated by SPIRE using the k8sbundle notifier plugin. SPIRE agents mount +# this config map and use the certificate to bootstrap trust with the SPIRE +# server during attestation. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-bundle + namespace: spire + +--- + +# ConfigMap containing the SPIRE server configuration. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-server + namespace: spire +data: + server.conf: | + server { + bind_address = "0.0.0.0" + bind_port = "8081" + trust_domain = "example.org" + data_dir = "/run/spire/data" + log_level = "DEBUG" + default_svid_ttl = "1h" + ca_ttl = "12h" + registration_uds_path = "/tmp/spire-server/private/api.sock" + ca_subject { + country = ["US"] + organization = ["SPIFFE"] + common_name = "" + } + } + + plugins { + DataStore "sql" { + plugin_data { + database_type = "sqlite3" + connection_string = "/run/spire/data/datastore.sqlite3" + } + } + + NodeAttestor "k8s_psat" { + plugin_data { + clusters = { + "example-cluster" = { + service_account_whitelist = ["spire:spire-agent"] + } + } + } + } + + KeyManager "disk" { + plugin_data { + keys_path = "/run/spire/data/keys.json" + } + } + + Notifier "k8sbundle" { + plugin_data { + # This plugin updates the bundle.crt value in the spire:spire-bundle + # ConfigMap by default, so no additional configuration is necessary. + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-workload-registrar + namespace: spire +data: + k8s-workload-registrar.conf: | + trust_domain = "example.org" + server_socket_path = "/tmp/spire-server/private/api.sock" + cluster = "example-cluster" + mode = "reconcile" + pod_label = "spire-workload" + metrics_addr = "0" + +--- + +# This is the Deployment for the SPIRE server. It waits for SPIRE database to +# initialize and uses the SPIRE healthcheck command for liveness/readiness +# probes. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spire-server + namespace: spire + labels: + app: spire-server +spec: + replicas: 1 + selector: + matchLabels: + app: spire-server + template: + metadata: + namespace: spire + labels: + app: spire-server + spec: + serviceAccountName: spire-server + shareProcessNamespace: true + containers: + - name: spire-server + image: gcr.io/spiffe-io/spire-server:0.12.0 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/server.conf"] + ports: + - containerPort: 8081 + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-server-socket + mountPath: /tmp/spire-server/private + readOnly: false + livenessProbe: + httpGet: + path: /live + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + - name: k8s-workload-registrar + image: gcr.io/spiffe-io/k8s-workload-registrar:0.12.0 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/k8s-workload-registrar/conf/k8s-workload-registrar.conf"] + ports: + - containerPort: 8443 + name: registrar-port + volumeMounts: + - name: spire-server-socket + mountPath: /tmp/spire-server/private + readOnly: true + - name: k8s-workload-registrar + mountPath: /run/spire/k8s-workload-registrar/conf + readOnly: true + volumes: + - name: spire-config + configMap: + name: spire-server + - name: spire-server-socket + hostPath: + path: /run/spire/server-sockets + type: DirectoryOrCreate + - name: k8s-workload-registrar + configMap: + name: k8s-workload-registrar + +--- + +# Service definition for SPIRE server defining the gRPC port. +apiVersion: v1 +kind: Service +metadata: + name: spire-server + namespace: spire +spec: + type: NodePort + ports: + - name: grpc + port: 8081 + targetPort: 8081 + protocol: TCP + selector: + app: spire-server + +--- + +# Service definition for the admission webhook +apiVersion: v1 +kind: Service +metadata: + name: k8s-workload-registrar + namespace: spire +spec: + selector: + app: spire-server + ports: + - port: 443 + targetPort: registrar-port diff --git a/k8s/k8s-workload-registrar/mode-reconcile/k8s/workload.yaml b/k8s/k8s-workload-registrar/mode-reconcile/k8s/workload.yaml new file mode 100644 index 0000000..df35b43 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-reconcile/k8s/workload.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-workload + namespace: spire + labels: + app: example-workload +spec: + selector: + matchLabels: + app: example-workload + template: + metadata: + namespace: spire + labels: + app: example-workload + spire-workload: example-workload + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: example-workload + image: gcr.io/spiffe-io/spire-agent:0.12.0 + imagePullPolicy: IfNotPresent + command: ["/usr/bin/dumb-init", "/opt/spire/bin/spire-agent", "api", "watch"] + args: ["-socketPath", "/tmp/spire-agent/public/api.sock"] + volumeMounts: + - name: spire-agent-socket + mountPath: /tmp/spire-agent/public + readOnly: true + volumes: + - name: spire-agent-socket + hostPath: + path: /run/spire/agent-sockets + type: Directory diff --git a/k8s/k8s-workload-registrar/mode-reconcile/scripts/deploy-scenario.sh b/k8s/k8s-workload-registrar/mode-reconcile/scripts/deploy-scenario.sh new file mode 100755 index 0000000..998e16b --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-reconcile/scripts/deploy-scenario.sh @@ -0,0 +1,9 @@ +kubectl apply -f k8s/namespace.yaml +kubectl apply -f k8s/spire-server.yaml +kubectl rollout status deployment/spire-server -n spire + +kubectl apply -f k8s/spire-agent.yaml +kubectl rollout status daemonset/spire-agent -n spire + +kubectl apply -f k8s/workload.yaml +kubectl rollout status deployment/example-workload -n spire diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/admission-control.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/admission-control.yaml new file mode 100644 index 0000000..05480c2 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/admission-control.yaml @@ -0,0 +1,8 @@ +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: ValidatingAdmissionWebhook + configuration: + apiVersion: apiserver.config.k8s.io/v1alpha1 + kind: WebhookAdmission + kubeConfigFile: /etc/kubernetes/pki/admctrl/kubeconfig.yaml diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/kubeconfig.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/kubeconfig.yaml new file mode 100644 index 0000000..72942c5 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/kubeconfig.yaml @@ -0,0 +1,9 @@ +# KubeConfig with client credentials for the API Server to use to call the +# K8S Workload Registrar service +apiVersion: v1 +kind: Config +users: +- name: k8s-workload-registrar.spire.svc + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJ1VENDQVYrZ0F3SUJBZ0lJVVNIdmpGQTFxRHd3Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF4TVoKU3poVElGZFBVa3RNVDBGRUlGSkZSMGxUVkZKQlVpQkRRVEFnRncweE9UQTFNVE14T1RFME1qTmFHQTg1T1RrNQpNVEl6TVRJek5UazFPVm93S0RFbU1DUUdBMVVFQXhNZFN6aFRJRmRQVWt0TVQwRkVJRkpGUjBsVFZGSkJVaUJEClRFbEZUbFF3V1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkNBQVM3SDIrMjJOcEFhTmVRdXQvZEYwdUYKMXk0VDVKTVdBczJOYm9NOXhZdlFKb1FXTVVNNERobWZQT1hVaE5STXdkb1JzTmhSdXZsYkROY2FEU29tNE1DYQpvM1V3Y3pBT0JnTlZIUThCQWY4RUJBTUNBNmd3RXdZRFZSMGxCQXd3Q2dZSUt3WUJCUVVIQXdJd0RBWURWUjBUCkFRSC9CQUl3QURBZEJnTlZIUTRFRmdRVW9EYlBiOUpWNXhqZlZVMnBhSzd2UUNsZ2d3SXdId1lEVlIwakJCZ3cKRm9BVW02eFNULzJCUzRYdmhVcXVzaDJCTEwwdlJNSXdDZ1lJS29aSXpqMEVBd0lEU0FBd1JRSWdHNzRQeWkyZQpONlBEcVRGRnY1UDFjNFhjVVdERzMwdzJIZEU4Wm8rMStVWUNJUURUL2xMa2dUUjUzV01INVRqWkllblhmYzFjCmxkMGlqSmpvRFJIR3lIRjJxdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + client-key-data: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ1BhSWtTTVowUmduQllWYncKMDIrdlN5UUpDM2RtZ0VDNFBLN2svTnk4Qnh1aFJBTkNBQVM3SDIrMjJOcEFhTmVRdXQvZEYwdUYxeTRUNUpNVwpBczJOYm9NOXhZdlFKb1FXTVVNNERobWZQT1hVaE5STXdkb1JzTmhSdXZsYkROY2FEU29tNE1DYQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/k8s-workload-registrar-secret.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/k8s-workload-registrar-secret.yaml new file mode 100644 index 0000000..04e2e89 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/k8s-workload-registrar-secret.yaml @@ -0,0 +1,9 @@ +# Kubernetes Secret containing the K8S Workload Registrar server key +apiVersion: v1 +kind: Secret +metadata: + name: k8s-workload-registrar-secret + namespace: spire +type: Opaque +data: + server-key.pem: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ3RqS0h2ckVjVWJDdWtlUG8KaXJSMDRqSnZyWW1ONlF3cHlQSlFFTWtsZ3MraFJBTkNBQVJVdzRwSG1XQ3pyZmprWHNlbjkrbVNQemlmV1Y0MwpzNlNaMUorK3h2RFhNMmpPaE04NlZwL1JkQzBtMkZOajNXWWc2c3VSbEV6dmYvRncyQ3N1WmJtbwotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/namespace.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/namespace.yaml new file mode 100644 index 0000000..c6ba349 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: spire diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-agent.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-agent.yaml new file mode 100644 index 0000000..c283e33 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-agent.yaml @@ -0,0 +1,171 @@ +# ServiceAccount for the SPIRE agent +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-agent + namespace: spire + +--- + +# Required cluster role to allow spire-agent to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role +rules: +- apiGroups: [""] + resources: ["pods","nodes","nodes/proxy"] + verbs: ["get"] + +--- + +# Binds above cluster role to spire-agent service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role-binding +subjects: +- kind: ServiceAccount + name: spire-agent + namespace: spire +roleRef: + kind: ClusterRole + name: spire-agent-cluster-role + apiGroup: rbac.authorization.k8s.io + + +--- + +# ConfigMap for the SPIRE agent featuring: +# 1) PSAT node attestation +# 2) K8S Workload Attestation over the secure kubelet port +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-agent + namespace: spire +data: + agent.conf: | + agent { + data_dir = "/run/spire" + log_level = "DEBUG" + server_address = "spire-server" + server_port = "8081" + trust_bundle_path = "/run/spire/bundle/bundle.crt" + trust_domain = "example.org" + socket_path = "/tmp/spire-agent/public/api.sock" + } + + plugins { + NodeAttestor "k8s_psat" { + plugin_data { + cluster = "example-cluster" + } + } + + KeyManager "memory" { + plugin_data { + } + } + + WorkloadAttestor "k8s" { + plugin_data { + # Defaults to the secure kubelet port by default. + # Minikube does not have a cert in the cluster CA bundle that + # can authenticate the kubelet cert, so skip validation. + skip_kubelet_verification = true + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: spire-agent + namespace: spire + labels: + app: spire-agent +spec: + selector: + matchLabels: + app: spire-agent + updateStrategy: + type: RollingUpdate + template: + metadata: + namespace: spire + labels: + app: spire-agent + spire-workload: agent + annotations: + spiffe.io/spiffe-id: "testing/agent" + spec: + # hostPID is required for K8S Workload Attestation. + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: spire-agent + initContainers: + - name: init + # This is a small image with wait-for-it, choose whatever image + # you prefer that waits for a service to be up. This image is built + # from https://github.com/lqhl/wait-for-it + image: gcr.io/spiffe-io/wait-for-it + args: ["-t", "30", "spire-server:8081"] + containers: + - name: spire-agent + image: gcr.io/spiffe-io/spire-agent:0.12.0 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/agent.conf"] + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-bundle + mountPath: /run/spire/bundle + readOnly: true + - name: spire-agent-socket + mountPath: /tmp/spire-agent/public + readOnly: false + - name: spire-token + mountPath: /var/run/secrets/tokens + livenessProbe: + httpGet: + path: /live + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 10 + volumes: + - name: spire-config + configMap: + name: spire-agent + - name: spire-bundle + configMap: + name: spire-bundle + - name: spire-agent-socket + hostPath: + path: /run/spire/agent-sockets + type: DirectoryOrCreate + - name: spire-token + projected: + sources: + - serviceAccountToken: + path: spire-agent + expirationSeconds: 7200 + audience: spire-server diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-server.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-server.yaml new file mode 100644 index 0000000..8dca6e5 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-server.yaml @@ -0,0 +1,334 @@ +# ServiceAccount used by the SPIRE server. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-server + namespace: spire + +--- + +# Required cluster role to allow spire-server to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role +rules: +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + # allow TokenReview requests (to verify service account tokens for PSAT + # attestation) +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["get", "create"] + +--- + +# Binds above cluster role to spire-server service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role-binding + namespace: spire +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: ClusterRole + name: spire-server-cluster-role + apiGroup: rbac.authorization.k8s.io + +--- + +# Role for the SPIRE server +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: spire + name: spire-server-role +rules: + # allow "get" access to pods (to resolve selectors for PSAT attestation) +- apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE + # agent bootstrapping, see the spire-bundle ConfigMap below) +- apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["spire-bundle"] + verbs: ["get", "patch"] + +--- + +# RoleBinding granting the spire-server-role to the SPIRE server +# service account. +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-role-binding + namespace: spire +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: Role + name: spire-server-role + apiGroup: rbac.authorization.k8s.io + +--- + +# ConfigMap containing the latest trust bundle for the trust domain. It is +# updated by SPIRE using the k8sbundle notifier plugin. SPIRE agents mount +# this config map and use the certificate to bootstrap trust with the SPIRE +# server during attestation. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-bundle + namespace: spire + +--- + +# ConfigMap containing the SPIRE server configuration. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-server + namespace: spire +data: + server.conf: | + server { + bind_address = "0.0.0.0" + bind_port = "8081" + trust_domain = "example.org" + data_dir = "/run/spire/data" + log_level = "DEBUG" + default_svid_ttl = "1h" + ca_ttl = "12h" + registration_uds_path = "/tmp/spire-server/private/api.sock" + ca_subject { + country = ["US"] + organization = ["SPIFFE"] + common_name = "" + } + } + + plugins { + DataStore "sql" { + plugin_data { + database_type = "sqlite3" + connection_string = "/run/spire/data/datastore.sqlite3" + } + } + + NodeAttestor "k8s_psat" { + plugin_data { + clusters = { + "example-cluster" = { + service_account_whitelist = ["spire:spire-agent"] + } + } + } + } + + KeyManager "disk" { + plugin_data { + keys_path = "/run/spire/data/keys.json" + } + } + + Notifier "k8sbundle" { + plugin_data { + # This plugin updates the bundle.crt value in the spire:spire-bundle + # ConfigMap by default, so no additional configuration is necessary. + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-workload-registrar + namespace: spire +data: + k8s-workload-registrar.conf: | + trust_domain = "example.org" + server_socket_path = "/tmp/spire-server/private/api.sock" + cluster = "example-cluster" + mode = "webhook" + cert_path = "/run/spire/k8s-workload-registrar/certs/server-cert.pem" + key_path = "/run/spire/k8s-workload-registrar/secret/server-key.pem" + cacert_path = "/run/spire/k8s-workload-registrar/certs/cacert.pem" + +--- + +# ConfigMap containing the K8S Workload Registrar server certificate and +# CA bundle used to verify the client certificate presented by the API server. +# +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-workload-registrar-certs + namespace: spire +data: + server-cert.pem: | + -----BEGIN CERTIFICATE----- + MIIB5zCCAY6gAwIBAgIIQhiO2hfTsKQwCgYIKoZIzj0EAwIwJDEiMCAGA1UEAxMZ + SzhTIFdPUktMT0FEIFJFR0lTVFJBUiBDQTAgFw0xOTA1MTMxOTE0MjNaGA85OTk5 + MTIzMTIzNTk1OVowKDEmMCQGA1UEAxMdSzhTIFdPUktMT0FEIFJFR0lTVFJBUiBT + RVJWRVIwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARUw4pHmWCzrfjkXsen9+mS + PzifWV43s6SZ1J++xvDXM2jOhM86Vp/RdC0m2FNj3WYg6suRlEzvf/Fw2CsuZbmo + o4GjMIGgMA4GA1UdDwEB/wQEAwIDqDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNV + HRMBAf8EAjAAMB0GA1UdDgQWBBS+rw+LUFZAT45Ia8SnrfdWOBtAAzAfBgNVHSME + GDAWgBSbrFJP/YFLhe+FSq6yHYEsvS9EwjArBgNVHREEJDAigiBrOHMtd29ya2xv + YWQtcmVnaXN0cmFyLnNwaXJlLnN2YzAKBggqhkjOPQQDAgNHADBEAiBSaDzjPws6 + Kt68mcJGAYBuWasdgdXJXeySzcnfieXe5AIgXwwaeq+deuF4+ckEY6WIzNWoIPOd + SDoLJWybQN17R0M= + -----END CERTIFICATE----- + + cacert.pem: | + -----BEGIN CERTIFICATE----- + MIIBgTCCASigAwIBAgIIVLxbHbQsZQMwCgYIKoZIzj0EAwIwJDEiMCAGA1UEAxMZ + SzhTIFdPUktMT0FEIFJFR0lTVFJBUiBDQTAgFw0xOTA1MTMxOTE0MjNaGA85OTk5 + MTIzMTIzNTk1OVowJDEiMCAGA1UEAxMZSzhTIFdPUktMT0FEIFJFR0lTVFJBUiBD + QTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJNq7IL77XWiWbohBOsmrCKMj+g3 + z/+U0c5HmXRj7lbSpjofS0Y1RkTHMEJSvAoMHzssCe5/MDMHX5Xnn4r/LSGjQjBA + MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSbrFJP + /YFLhe+FSq6yHYEsvS9EwjAKBggqhkjOPQQDAgNHADBEAiBaun9z1WGCSkjx4P+x + mhZkiu1HsOifT9SGQx3in48OSgIgJm02lvnuuKcO/YT2CGHqZ7QjGAnJQY6uLgEQ + 7CXLvcI= + -----END CERTIFICATE----- + +--- + +# This is the Deployment for the SPIRE server. It waits for SPIRE database to +# initialize and uses the SPIRE healthcheck command for liveness/readiness +# probes. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spire-server + namespace: spire + labels: + app: spire-server +spec: + replicas: 1 + selector: + matchLabels: + app: spire-server + template: + metadata: + namespace: spire + labels: + app: spire-server + spec: + serviceAccountName: spire-server + shareProcessNamespace: true + containers: + - name: spire-server + image: gcr.io/spiffe-io/spire-server:0.12.0 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/server.conf"] + ports: + - containerPort: 8081 + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-server-socket + mountPath: /tmp/spire-server/private + readOnly: false + livenessProbe: + httpGet: + path: /live + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + - name: k8s-workload-registrar + image: gcr.io/spiffe-io/k8s-workload-registrar:0.12.0 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/k8s-workload-registrar/conf/k8s-workload-registrar.conf"] + ports: + - containerPort: 8443 + name: registrar-port + volumeMounts: + - name: spire-server-socket + mountPath: /tmp/spire-server/private + readOnly: true + - name: k8s-workload-registrar + mountPath: /run/spire/k8s-workload-registrar/conf + readOnly: true + - name: k8s-workload-registrar-certs + mountPath: /run/spire/k8s-workload-registrar/certs + readOnly: true + - name: k8s-workload-registrar-secret + mountPath: /run/spire/k8s-workload-registrar/secret + readOnly: true + volumes: + - name: spire-config + configMap: + name: spire-server + - name: spire-server-socket + hostPath: + path: /run/spire/server-sockets + type: DirectoryOrCreate + - name: k8s-workload-registrar + configMap: + name: k8s-workload-registrar + - name: k8s-workload-registrar-certs + configMap: + name: k8s-workload-registrar-certs + - name: k8s-workload-registrar-secret + secret: + secretName: k8s-workload-registrar-secret + +--- + +# Service definition for SPIRE server defining the gRPC port. +apiVersion: v1 +kind: Service +metadata: + name: spire-server + namespace: spire +spec: + type: NodePort + ports: + - name: grpc + port: 8081 + targetPort: 8081 + protocol: TCP + selector: + app: spire-server + +--- + +# Service definition for the admission webhook +apiVersion: v1 +kind: Service +metadata: + name: k8s-workload-registrar + namespace: spire +spec: + selector: + app: spire-server + ports: + - port: 443 + targetPort: registrar-port diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/validation-webhook.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/validation-webhook.yaml new file mode 100644 index 0000000..0c20483 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/validation-webhook.yaml @@ -0,0 +1,23 @@ +# Validating Webhook Configuration for the K8S Workload Registrar +# +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: k8s-workload-registrar-webhook +webhooks: + - name: k8s-workload-registrar.spire.svc + clientConfig: + service: + name: k8s-workload-registrar + namespace: spire + path: "/validate" + caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnVENDQVNpZ0F3SUJBZ0lJVkx4YkhiUXNaUU13Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF4TVoKU3poVElGZFBVa3RNVDBGRUlGSkZSMGxUVkZKQlVpQkRRVEFnRncweE9UQTFNVE14T1RFME1qTmFHQTg1T1RrNQpNVEl6TVRJek5UazFPVm93SkRFaU1DQUdBMVVFQXhNWlN6aFRJRmRQVWt0TVQwRkVJRkpGUjBsVFZGSkJVaUJEClFUQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJKTnE3SUw3N1hXaVdib2hCT3NtckNLTWorZzMKei8rVTBjNUhtWFJqN2xiU3Bqb2ZTMFkxUmtUSE1FSlN2QW9NSHpzc0NlNS9NRE1IWDVYbm40ci9MU0dqUWpCQQpNQTRHQTFVZER3RUIvd1FFQXdJQmhqQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2JyRkpQCi9ZRkxoZStGU3E2eUhZRXN2UzlFd2pBS0JnZ3Foa2pPUFFRREFnTkhBREJFQWlCYXVuOXoxV0dDU2tqeDRQK3gKbWhaa2l1MUhzT2lmVDlTR1F4M2luNDhPU2dJZ0ptMDJsdm51dUtjTy9ZVDJDR0hxWjdRakdBbkpRWTZ1TGdFUQo3Q1hMdmNJPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + admissionReviewVersions: + - v1beta1 + rules: + - apiGroups: [""] + apiVersions: ["v1"] + operations: ["CREATE", "DELETE"] + resources: ["pods"] + scope: "Namespaced" + sideEffects: None diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/workload.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/workload.yaml new file mode 100644 index 0000000..9aad1c7 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/workload.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-workload + namespace: spire + labels: + app: example-workload +spec: + selector: + matchLabels: + app: example-workload + template: + metadata: + namespace: spire + labels: + app: example-workload + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: example-workload + image: gcr.io/spiffe-io/spire-agent:0.12.0 + imagePullPolicy: IfNotPresent + command: ["/usr/bin/dumb-init", "/opt/spire/bin/spire-agent", "api", "watch"] + args: ["-socketPath", "/tmp/spire-agent/public/api.sock"] + volumeMounts: + - name: spire-agent-socket + mountPath: /tmp/spire-agent/public + readOnly: true + volumes: + - name: spire-agent-socket + hostPath: + path: /run/spire/agent-sockets + type: Directory diff --git a/k8s/k8s-workload-registrar/mode-webhook/scripts/deploy-scenario.sh b/k8s/k8s-workload-registrar/mode-webhook/scripts/deploy-scenario.sh new file mode 100755 index 0000000..fc49187 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-webhook/scripts/deploy-scenario.sh @@ -0,0 +1,11 @@ +kubectl apply -f k8s/namespace.yaml +kubectl apply -f k8s/k8s-workload-registrar-secret.yaml +kubectl apply -f k8s/spire-server.yaml +kubectl rollout status deployment/spire-server -n spire + +kubectl apply -f k8s/validation-webhook.yaml +kubectl apply -f k8s/spire-agent.yaml +kubectl rollout status daemonset/spire-agent -n spire + +kubectl apply -f k8s/workload.yaml +kubectl rollout status deployment/example-workload -n spire From 5b977857024b7c45e81cce2b2c92459d4033e21e Mon Sep 17 00:00:00 2001 From: Luciano Date: Mon, 17 May 2021 17:45:26 -0300 Subject: [PATCH 3/5] add tests and modify readme Signed-off-by: Luciano --- .travis.yml | 2 + k8s/k8s-workload-registrar/README.md | 200 ++++++++----- k8s/k8s-workload-registrar/create-cluster.sh | 3 - .../k8s-workload-registrar-cluster-role.yaml | 31 -- .../k8s/k8s-workload-registrar-configmap.yaml | 27 -- .../k8s-workload-registrar-statefulset.yaml | 107 ------- .../mode-crd/k8s/spire-agent.yaml | 81 ++--- .../mode-crd/k8s/spire-server.yaml | 277 +++++++++++------- .../mode-crd/scripts/deploy-scenario.sh | 18 +- .../mode-crd/scripts/test.sh | 55 ++++ .../mode-reconcile/k8s/spire-server.yaml | 19 +- .../mode-reconcile/scripts/deploy-scenario.sh | 15 +- .../mode-reconcile/scripts/test.sh | 55 ++++ .../mode-webhook/k8s/spire-agent.yaml | 1 - .../mode-webhook/k8s/spire-server.yaml | 3 +- .../{ => mode-webhook}/kind-config.yaml | 2 +- .../mode-webhook/scripts/create-cluster.sh | 7 + .../mode-webhook/scripts/deploy-scenario.sh | 19 +- .../mode-webhook/scripts/test.sh | 55 ++++ k8s/k8s-workload-registrar/test.sh | 31 ++ 20 files changed, 584 insertions(+), 424 deletions(-) delete mode 100755 k8s/k8s-workload-registrar/create-cluster.sh delete mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-cluster-role.yaml delete mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-configmap.yaml delete mode 100644 k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-statefulset.yaml mode change 100644 => 100755 k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh create mode 100755 k8s/k8s-workload-registrar/mode-crd/scripts/test.sh create mode 100755 k8s/k8s-workload-registrar/mode-reconcile/scripts/test.sh rename k8s/k8s-workload-registrar/{ => mode-webhook}/kind-config.yaml (93%) create mode 100755 k8s/k8s-workload-registrar/mode-webhook/scripts/create-cluster.sh create mode 100755 k8s/k8s-workload-registrar/mode-webhook/scripts/test.sh create mode 100755 k8s/k8s-workload-registrar/test.sh diff --git a/.travis.yml b/.travis.yml index f7a9815..1041cf9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,6 +25,8 @@ before_script: - minikube update-context # Wait for Kubernetes to be up and ready. - JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 1; done + # Download kind + - curl -Lo kind https://kind.sigs.k8s.io/dl/v0.10.0/kind-linux-amd64 && chmod +x kind && sudo mv kind /usr/local/bin/ script: - ./k8s/test-all.sh diff --git a/k8s/k8s-workload-registrar/README.md b/k8s/k8s-workload-registrar/README.md index 4898a51..b8d2b70 100644 --- a/k8s/k8s-workload-registrar/README.md +++ b/k8s/k8s-workload-registrar/README.md @@ -1,5 +1,5 @@ # Configure SPIRE to use the Kubernetes Workload Registrar - This tutorial builds on the [Kubernetes Quickstart Tutorial](../quickstart/) to provide an example of how to configure the SPIRE Kubernetes Workload Registrar as a container within the SPIRE Server pod. The registrar enables automatic workload registration and management in SPIRE Kubernetes implementations. The changes required to deploy the registrar and the necessary files are shown as a delta to the quickstart tutorial, so it is highly encouraged to execute, or at least read through, the Kubernetes Quickstart Tutorial first. + This tutorial provides an example of how to configure the SPIRE Kubernetes Workload Registrar as a container within the SPIRE Server pod using a local cluster deployed with [kind](https://kind.sigs.k8s.io/). The registrar enables automatic workload registration and management in SPIRE Kubernetes implementations. It is highly encouraged to execute, or at least read through, the [Kubernetes Quickstart Tutorial](../quickstart/) to fully understand this tutorial as a similar deployment is used here. This tutorial demonstrates how to use the registrar's three different modes: @@ -19,17 +19,20 @@ For documentation about SPIRE Kubernetes Workload Registrar configuration option # Prerequisites Before proceeding, review the following list: - * You'll need access to the Kubernetes environment configured when going through the [Kubernetes Quickstart Tutorial](../quickstart/). - * Required configuration files for this tutorial can be found in the `k8s/k8s-workload-registrar` directory in [https://github.com/spiffe/spire-tutorials](https://github.com/spiffe/spire-tutorials). If you didn't already clone the repo for the _Kubernetes Quickstart Tutorial_, please do so now. + * It is recommended to go through the [Kubernetes Quickstart Tutorial](../quickstart/) before proceeding with the steps described in this guide. + * Required configuration files for this tutorial can be found in the `k8s/k8s-workload-registrar` directory in [https://github.com/spiffe/spire-tutorials](https://github.com/spiffe/spire-tutorials). * The steps in this document should work with Kubernetes version 1.20.2. We will deploy an scenario that consists of a StatefulSet containing a SPIRE Server and the Kubernetes Workload Registrar, a SPIRE Agent, and a workload, and configure the different modes to illustrate the automatic registration entries creation. # Common configuration -The SPIRE Server and the Kubernetes Workload registrar will communicate each other using a socket, that will be mounted at the `/tmp/spire-server/private` directory, as we can see from the `volumeMounts` section of both containers. The only difference between these sections is that, for the registrar, the socket will have the `readOnly` option set to `false`, while for the SPIRE Server container it will have its value set to `true`. Below, this section is shown for the registrar container. +Below, we describe parts of the configurations that are common to the three modes. + +The SPIRE Server and the Kubernetes Workload registrar will communicate each other using a socket, that will be mounted at the `/tmp/spire-server/private` directory, as we can see from the `volumeMounts` section of both containers. The only difference between these sections is that, for the registrar, the socket will have the `readOnly` option set to `true`, while for the SPIRE Server container it will have its value set to `false`. The registrar container's section that illustrate what we described earlier is shown: ``` +volumeMounts: - name: spire-server-socket mountPath: /tmp/spire-server/private readOnly: true @@ -37,6 +40,8 @@ The SPIRE Server and the Kubernetes Workload registrar will communicate each oth # Webhook mode (default) +In this section we will review the important files needed to configure Webhook mode. + This mode makes use of the `ValidatingWebhookConfiguration` feature from Kubernetes, which is called by the Kubernetes API server everytime a new pod is created or deleted in the cluster, as we can see from the rules of the resource below: ``` @@ -62,7 +67,7 @@ webhooks: scope: "Namespaced" ``` -This webhook itself authenticates the API server, and for this reason we provide a CA bundle, with the `caBundle` option, as we can see in the stanza above (value ommited for brevity). This authentication must be done to ensure that it is the API server who is contacting the webhook, because this situation will lead to registration entries creation or deletion on the SPIRE Server, something that is a key point in the SPIRE infrastructure. +This webhook itself authenticates the API server, and for this reason we provide a CA bundle, with the `caBundle` option, as we can see in the stanza above (value ommited for brevity). This authentication must be done to ensure that it is the API server who is contacting the webhook, because this situation will lead to registration entries creation or deletion on the SPIRE Server, something that is a key point in the SPIRE infrastructure, and should be tightly controlled. Also, a secret is volume mounted in the `/run/spire/k8s-workload-registrar/secret` directory inside the SPIRE Server container, containing the K8S Workload Registrar server key. We can see this in the `volumeMounts` section of the SPIRE Server statefulset configuration file: @@ -95,7 +100,7 @@ Another configuration that is relevant in this mode is the registrar certificate readOnly: true ``` -The certificates for the CA and for the server are stored in a `ConfigMap`: +This certificates are stored in a `ConfigMap`: ``` apiVersion: v1 @@ -152,7 +157,7 @@ users: client-key-data: ... ``` -To be mounted, an `AdmissionConfiguration` describes where the API server can locate the file containing the `KubeConfig` entry. This file is passed to the API server via the `--extra-config=apiserver.admission-control-config-file` flag. +An `AdmissionConfiguration` is mounted inside the node too, and it describes where the API server can locate the file containing the `KubeConfig` entry used in the authentication process. ``` apiVersion: apiserver.k8s.io/v1alpha1 @@ -165,19 +170,51 @@ plugins: kubeConfigFile: /etc/kubernetes/pki/admctrl/kubeconfig.yaml ``` +To mount the two files into the node, we will use the special option `extraMounts` of kind, that allows us to pass files between the host and a kind node. + +``` +nodes: +- role: control-plane + extraMounts: + - containerPath: /etc/kubernetes/pki/admctrl + hostPath: WEBHOOKDIR/k8s/admctrl +``` + +The ```WEBHOOKDIR/k8s/admctrl``` variable points to the folder host path where the files are stored, and `containerPath` specify the directory in which the files will be mounted. + +In kind's specific configuration file (`kind-config.yaml`), we use the `admission-control-config-file` option to tell the API server where to find the admission configuration. Note that this value match with the one set in `ContainerPath`. + +``` +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +kubeadmConfigPatches: +- | + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + metadata: + name: config + apiServer: + extraArgs: + "service-account-signing-key-file": "/etc/kubernetes/pki/sa.key" + "service-account-issuer": "api" + "service-account-api-audiences": "api,spire-server" + "admission-control-config-file": "/etc/kubernetes/pki/admctrl/admission-control.yaml" +... +``` + We have looked at the key points of the webhook mode's configuration, so let's apply the necessary files to set our scenario with a SPIRE Server with the registrar container in it, an Agent, and a workload, by issuing the following command in the `mode-webhook` directory: ```console $ bash scripts/deploy-scenario.sh ``` -This is all we need to have the registration entries created on the server. We will start a shell into the SPIRE Server container and run the entry show directive by executing the command below: +This will create a new kubernetes cluster for us, and apply the necessary files for the scenario to work. This is all we need to have the registration entries created on the server. We will run the server command to see the registration entries created, by executing the command below: ```console -$ kubectl exec -it deployment/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` -You should see the following 3 registration entries, corresponding to the node, the agent, and the workload. +You should see the following 3 registration entries, corresponding to the node, the agent, and the workload (the order of the results may differ with your output). ```console Found 3 entries @@ -207,19 +244,19 @@ Selector : k8s:pod-name:example-workload-6877cd47d5-2fmpq We omitted the entry ids, as it may change with every run. Let's see how the other fields are built: -The cluster name *example-cluster* is used as Parent ID in all the entries, and there is no reference to the node that the pods belong to, this is, all the registration entries are mapped to a single node entry inside the cluster. This represents a drawback for this mode, as all the nodes in the cluster have permission to get identities for all the workloads that belong to the Kubernetes cluster, which increases the blast radius in case of a node being compromised, among other disadvantages. +The cluster name *example-cluster* is used as Parent ID for the entries that correspond to the agent and the workload, but there is no reference to the node that this pods belong to, this is, the registration entries are mapped to a single node entry inside the cluster. This represents a drawback for this mode, as all the nodes in the cluster have permission to get identities for all the workloads that belong to the Kubernetes cluster, which increases the blast radius in case of a node being compromised, among other disadvantages. Taking a look on the assigned SPIFFE IDs for the agent and the workload, we can see that they have the following form: *spiffe://\/ns/\/sa/\*. From this, we can conclude that we are using the registrar configured with the Service Account Based workload registration (which is the default behaviour). For instance, as the workload uses the *default* service account, into the *spire* namespace, its SPIFFE ID is: *spiffe://example.org/ns/spire/sa/default* -Another thing that is worth looking, is the registrar log, in which we will found out if the entries were created by this container. Run the following command to get the logs of the registrar, and to look for the *Created pod entry* keyword. +Another thing that is worth to examine is the registrar log, in which we will found out if the entries were created by this container. Run the following command to get the logs of the registrar, and to look for the *Created pod entry* keyword. ```console -kubectl logs deployment/spire-server -n spire -c k8s-workload-registrar | grep "Created pod entry" +$ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "Created pod entry" ``` -From the output of this command, we can conclude that the 3 entries that were present on the SPIRE Server were created by the registrar, and correspond to the node, agent, and workload, in that specific order. +The output of this command includes 3 lines, one for every entry created, and we can conclude that the 3 entries that were present on the SPIRE Server were created by the registrar. They correspond to the node, agent, and workload, in that specific order. ## Pod deletion @@ -232,7 +269,7 @@ $ kubectl delete deployment/example-workload -n spire Again, check for the registration entries with the command below: ```console -$ kubectl exec -it deployment/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` The output of the command will not include the registration entry that correspond to the workload, because the pod was deleted, and should be similar to: @@ -255,13 +292,13 @@ Selector : k8s:ns:spire Selector : k8s:pod-name:spire-agent-wtx7b ``` -As the pod was deleted, we will check the registrar logs, looking for the "Deleted pod entry" keyword, with the command shown below: +We will check the registrar logs to find out if the registrar deleted the entry, looking for the "Deleted pod entry" keyword, with the command shown below: ```console -$ kubectl logs deployment/spire-server -n spire -c k8s-workload-registrar | grep "Deleting pod entries" +$ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "Deleting pod entries" ``` -From which we can conclude that the registrar successfuly deleted the corresponding entry of the *example-workload* pod. +The registrar successfuly deleted the corresponding entry for the *example-workload* pod. ## Teardown @@ -304,13 +341,14 @@ $ bash scripts/deploy-scenario.sh With the Reconcile scenario set, we will check the registration entries and some special considerations for this mode. Let's issue the command below to start a shell into the SPIRE Server container, and to show the existing registration entries. ```console -$ kubectl exec -it deployment/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` Your output should similar to the following, and shows the entries for the node, the agent and the workload: ```console -Entry ID : 84bb478d-2ec7-448c-86f6-51c8970c60ab +Found 3 entries +Entry ID : ... SPIFFE ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane Parent ID : spiffe://example.org/spire/server Revision : 0 @@ -318,8 +356,7 @@ TTL : default Selector : k8s_psat:agent_node_name:example-cluster-control-plane Selector : k8s_psat:cluster:example-cluster -Found 3 entries -Entry ID : 5ea1a895-d144-49fe-9d58-bbc7ad903bee +Entry ID : ... SPIFFE ID : spiffe://example.org/agent Parent ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane Revision : 0 @@ -327,7 +364,7 @@ TTL : default Selector : k8s:ns:spire Selector : k8s:pod-name:spire-agent-c5c5f -Entry ID : f9606cee-0773-4228-8440-ea2bac9ca3ed +Entry ID : ... SPIFFE ID : spiffe://example.org/example-workload Parent ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane Revision : 0 @@ -336,14 +373,14 @@ Selector : k8s:ns:spire Selector : k8s:pod-name:example-workload-b98cc787d-kzxz6 ``` -If we compare this entries to the Webhook mode ones, the difference is that the Parent ID of the SVID contains a reference to the node name where the pod is scheduled on. We mentioned that this is not happening using the Webhook node, and this was one of its principal drawbacks. Also, for the node registration entry (the one that has the SPIRE Server SPIFFE ID as the Parent ID), node name is used in the selectors, along with the cluster name. For the remaining two entries, pod name and namespace are used in the selectors instead. +If we compare this entries to those created using Webhook mode, the difference is that the Parent ID of the SVID contains a reference to the node name where the pod is scheduled on, in this case, `example-cluster-control-plane`. We mentioned that this doesn't happen using the Webhook node, and this was one of its principal drawbacks. Also, for the node registration entry (the one that has the SPIRE Server SPIFFE ID as the Parent ID), the node name is used in the selectors, along with the cluster name. For the remaining two entries, pod name and namespace are used in the selectors instead. As we are using Label workload registration mode, the SPIFFE ID's for the agent and the workload (which are labeled as we mentioned before) have the form: *spiffe://\/\*. For example, as the agent has the label value equal to `agent`, it has the following SPIFFE ID: *spiffe://example.org/agent*. Let's check if the registrar indeed created the registration entries, by checking its logs, and looking for the *Created new spire entry* keyword. Run the command that is shown below: ```console -$ kubectl logs deployment/spire-server -n spire -c k8s-workload-registrar | grep "Created new spire entry" +$ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "Created new spire entry" ``` We mentioned before that there were two reconciling controllers, and from the output of the command above, we can see that the node controller created the entry for the single node in the cluster, and that the pod controller created the entries for the two labeled pods: agent and workload. @@ -359,14 +396,14 @@ $ kubectl delete deployment/example-workload -n spire We will check if its corresponding entry is deleted too. Run the following command to see the registration entries on the SPIRE Server: ```console -$ kubectl exec -it deployment/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` The output will only show two registration entries, because the workload entry was deleted by the registrar: ```console Found 2 entries -Entry ID : 5ea1a895-d144-49fe-9d58-bbc7ad903bee +Entry ID : ... SPIFFE ID : spiffe://example.org/agent Parent ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane Revision : 0 @@ -374,7 +411,7 @@ TTL : default Selector : k8s:ns:spire Selector : k8s:pod-name:spire-agent-c5c5f -Entry ID : 84bb478d-2ec7-448c-86f6-51c8970c60ab +Entry ID : ... SPIFFE ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane Parent ID : spiffe://example.org/spire/server Revision : 0 @@ -386,7 +423,7 @@ Selector : k8s_psat:cluster:example-cluster If we look for the *Deleted entry* keyword on the registrar logs, we will find out that the registrar deleted the entry. Issue the following command: ```console -$ kubectl logs deployment/spire-server -n spire -c k8s-workload-registrar | grep "Deleted entry" +$ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "Deleted entry" ``` The pod controller successfuly deleted the entry. @@ -402,10 +439,10 @@ $ kubectl apply -f k8s/not-labeled-workload.yaml Let's see the existing registration entries with the command: ```console -$ kubectl exec -it deployment/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` -The output should remain constant, compared to the one that we obtained in the *Pod deletion* section. This is, the only two registration entries on the SPIRE Server corresponds to the labeled deployed resources. This is the expected behaviour, as only labeled pods will be considered by the workload registrar while using the Label Workload registration mode. +The output should remain constant compared to the one that we obtained in the *Pod deletion* section. This implies that the only two registration entries on the SPIRE Server corresponds to the labeled deployed resources. This is the expected behaviour, as only labeled pods will be considered by the workload registrar while using the Label Workload registration mode. ## Teardown @@ -450,25 +487,26 @@ data: cluster = "example-cluster" mode = "crd" pod_annotation = "spiffe.io/spiffe-id" + metrics_bind_addr = "0" ``` -Let's deploy the necessary files, including the base scenario plus the SPIFFE ID CRD definition, and examine the automatically created registration entries. +Let's deploy the necessary files, including the base scenario plus the SPIFFE ID CRD definition, and examine the automatically created registration entries. Ensure that your working directory is `mode-crd`, and run: ```console $ bash scripts/deploy-scenario.sh ``` -Start a shell into the SPIRE Server and run the entry show command by executing: +Run the entry show command by executing: ```console -$ kubectl exec -it deployment/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` The output should show the following registration entries: ```console Found 3 entries -Entry ID : cc0e9405-939d-4b45-96d7-e160c89cf6f5 +Entry ID : ... SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane Parent ID : spiffe://example.org/spire/server Revision : 1 @@ -476,7 +514,7 @@ TTL : default Selector : k8s_psat:agent_node_uid:08990bfd-3551-4761-8a1b-2e652984ffdd Selector : k8s_psat:cluster:example-cluster -Entry ID : b9b3b92b-06a2-4619-8fca-31ed1ea0138d +Entry ID : ... SPIFFE ID : spiffe://example.org/testing/agent Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane Revision : 1 @@ -486,7 +524,7 @@ Selector : k8s:ns:spire Selector : k8s:pod-uid:538886bb-48e1-4795-b386-10e97f50e34f DNS name : spire-agent-jzc8w -Entry ID : 561cc364-35fd-426e-9de6-e5db0605d1a1 +Entry ID : ... SPIFFE ID : spiffe://example.org/testing/example-workload Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane Revision : 1 @@ -497,15 +535,38 @@ Selector : k8s:pod-uid:78ed3fc5-4cff-476a-90f5-37d3abd47823 DNS name : example-workload-6877cd47d5-l4hv5 ``` -3 entries were created corresponding to the node, agent, and workload. For the node entry (the one that has the SPIRE Server SPIFFE ID as Parent ID), we see a difference in the selectors, comparing it with the selectors in the node entry created using Reconcile mode: we find out that instead of placing the node name, CRD mode stores the UID of the node where the agent is running on. As the node name is used in the SPIFFE ID assigned to the node, we can take this as a mapping from node UID to node name. +3 entries were created corresponding to the node, agent, and workload. For the node entry (the one that has the SPIRE Server SPIFFE ID as Parent ID), we see a difference in the selectors, comparing it with the selectors in the node entry created using Reconcile mode for the same pod: we find out that instead of using the node name, CRD mode stores the UID of the node where the agent is running on. As the node name is used in the SPIFFE ID assigned to the node, we can take this as a mapping from node UID to node name. Something similar happens with the pod entries, but this time the pod UID where the workload is running is stored in the selectors, instead of the node UID. If we now focus our attention on the SPIFFE IDs assigned to the workloads, we see that it takes the form of *spiffe://\/\*. By using Annotation Based workload registration, it is possible to freely set the SPIFFE ID path. In this case, for the workload, we set the annotation value to *example-workload*. +Obtain the registrar logs by issuing: + +```console +kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "Created entry" +``` + +This will show that the registrar created the 3 entries into the SPIRE Server. + +In addition to the SPIRE entries, the registrar in this mode is configure to create the corresponding custom resources. Let's check for this using a Kubernetes native command such as: + +```console +$ kubectl get spiffeids -n spire +``` + +This command will show the custom resources for each one of the pods: + +```console +NAME AGE +example-cluster-control-plane 24m +example-workload-5bffcd75d-stl5w 24m +spire-agent-r86rz 24m +``` + ## Pod deletion -As in the previous modes, if we delete the workload deployment, we will see that its corresponding registration entry will be deleted too. Let's run the command to delete the workload pod: +As in the previous modes, if we delete the workload deployment, we will see that its corresponding registration entry will be deleted too. Let's check it by running the command to delete the workload pod: ```console $ kubectl delete deployment/example-workload -n spire @@ -514,14 +575,14 @@ $ kubectl delete deployment/example-workload -n spire And now, check the registration entries in the SPIRE Server by executing: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` The output should look like: ```console Found 2 entries -Entry ID : cc0e9405-939d-4b45-96d7-e160c89cf6f5 +Entry ID : ... SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane Parent ID : spiffe://example.org/spire/server Revision : 1 @@ -529,7 +590,7 @@ TTL : default Selector : k8s_psat:agent_node_uid:08990bfd-3551-4761-8a1b-2e652984ffdd Selector : k8s_psat:cluster:example-cluster -Entry ID : b9b3b92b-06a2-4619-8fca-31ed1ea0138d +Entry ID : ... SPIFFE ID : spiffe://example.org/testing/agent Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane Revision : 1 @@ -540,11 +601,23 @@ Selector : k8s:pod-uid:538886bb-48e1-4795-b386-10e97f50e34f DNS name : spire-agent-jzc8w ``` -The only entries that should exist now are the ones that match the node and the SPIRE agent, because the workload one was deleted by the registrar. +The only entries that should exist now are the ones that match the node and the SPIRE agent, because the workload one was deleted by the registrar, something that we can check if we examine the registrar logs, but this time looking for the keyword "Deleted entry". + +```console +$ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep -A 1 "Deleted entry" +``` + +As the registrar handles the custom resources automatically, it also deleted the corresponding SPIFFE ID CRD, something that we can also check by querying the Kubernetes control plane (```kubectl get spiffeids -n spire```), command from which we will obtaing the following: + +```console +NAME AGE +example-cluster-control-plane 41m +spire-agent-r86rz 40m +``` ## Non-annotated pods -Let's check if a pod that has no annotations its considered by the registrar. Ensure that your working directory is `mode-crd` and deploy a new workload with this condition with the following command: +Let's check if a pod that has no annotations its considered by the registrar. Deploy a new workload with this condition with the following command: ```console $ kubectl apply -f k8s/not-annotated-workload.yaml @@ -553,51 +626,41 @@ $ kubectl apply -f k8s/not-annotated-workload.yaml As in the previous section, let's see the registration entries that are present in the SPIRE Server: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` The result of the command should be equal to the one shown in *Pod deletion* section, because no new entry has been created, as expected. ## SPIFFE ID CRD creation -One of the benefits of using CRD Mode is that we can manipulate the SPIFFE IDs as if they were resources inside Kubernetes environment, in other words using the *kubectl* command. +One of the benefits of using the CRD Mode is that we can manipulate the SPIFFE IDs as if they were resources inside Kubernetes environment, in other words using the *kubectl* command. -If we check for SPIFFE IDs resources (using *kubectl get spiffeids -n spire*), we'll obtain something like the following: +Let's create a new SPIFFE ID CRD by using: ```console -NAME AGE -example-cluster-control-plane 11m -spire-agent-jzc8w 11m -``` - -From this, we can see that there are two already created custom resources, corresponding to the three entries that we obtained when the scenario was deployed, minus the one for the annotated workload, whose pod was deleted in the *Pod deletion* section. - -From the `mode-crd` folder, let's create a new SPIFFE ID CRD by using: - -```console -# kubectl apply -f k8s/test_spiffeid.yaml +$ kubectl apply -f k8s/test_spiffeid.yaml ``` -We will check if it was created, executing the *kubectl get spiffeids -n spire* command, whose output will show 3 custom resources: +We will check if it was created, consulting the custom resources with ```kubectl get spiffeids -n spire```, whose output will show the following: ```console NAME AGE -example-cluster-control-plane 19m +example-cluster-control-plane 45m my-test-spiffeid 19s -spire-agent-jzc8w 18m +spire-agent-r86rz 45m ``` The resource was succesfully created, but had it any impact on the SPIRE Server? Let's execute the command below to see the registration entries: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` You'll get an output similar to this: ```console Found 3 entries -Entry ID : cc0e9405-939d-4b45-96d7-e160c89cf6f5 +Entry ID : ... SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane Parent ID : spiffe://example.org/spire/server Revision : 1 @@ -605,7 +668,7 @@ TTL : default Selector : k8s_psat:agent_node_uid:08990bfd-3551-4761-8a1b-2e652984ffdd Selector : k8s_psat:cluster:example-cluster -Entry ID : 0524528a-ca5b-452b-b5f9-7e9cb5652446 +Entry ID : ... SPIFFE ID : spiffe://example.org/test Parent ID : spiffe://example.org/spire/server Revision : 1 @@ -613,7 +676,7 @@ TTL : default Selector : k8s:ns:spire Selector : k8s:pod-name:my-test-pod -Entry ID : b9b3b92b-06a2-4619-8fca-31ed1ea0138d +Entry ID : ... SPIFFE ID : spiffe://example.org/testing/agent Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane Revision : 1 @@ -639,16 +702,15 @@ $ kubectl delete spiffeid/my-test-spiffeid -n spire Now, we will check the registration entries: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock" +$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` -The output from this command should include only the entries for the node and the agent, because the recently created SPIFFE ID CRD was deleted. +The output from this command should include only the entries for the node and the agent, because the recently created SPIFFE ID CRD was deleted, along with the entry. ## Teardown - To delete the resources used for this mode, we will delete the cluster by executing: ```console -kind delete cluster --name example-cluster +$ kind delete cluster --name example-cluster ``` diff --git a/k8s/k8s-workload-registrar/create-cluster.sh b/k8s/k8s-workload-registrar/create-cluster.sh deleted file mode 100755 index 7f9ad10..0000000 --- a/k8s/k8s-workload-registrar/create-cluster.sh +++ /dev/null @@ -1,3 +0,0 @@ -sed -i.bak "s#K8SDIR#${PWD}/mode-webhook/k8s#g" kind-config.yaml -rm kind-config.yaml.bak -kind create cluster --name example-cluster --config kind-config.yaml diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-cluster-role.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-cluster-role.yaml deleted file mode 100644 index 6efd997..0000000 --- a/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-cluster-role.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: k8s-workload-registrar-role -rules: -- apiGroups: [""] - resources: ["endpoints", "nodes", "pods"] - verbs: ["get", "list", "watch"] -- apiGroups: ["spiffeid.spiffe.io"] - resources: ["spiffeids"] - verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] -- apiGroups: ["spiffeid.spiffe.io"] - resources: ["spiffeids/status"] - verbs: ["get", "patch", "update"] -- apiGroups: ["admissionregistration.k8s.io"] - resources: ["validatingwebhookconfigurations"] - verbs: ["get", "list", "update", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: k8s-workload-registrar-role-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: k8s-workload-registrar-role -subjects: -- kind: ServiceAccount - name: spire-server - namespace: spire diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-configmap.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-configmap.yaml deleted file mode 100644 index 1a17c79..0000000 --- a/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-configmap.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: k8s-workload-registrar - namespace: spire -data: - k8s-workload-registrar.conf: | - trust_domain = "example.org" - server_socket_path = "/tmp/spire-server/private/api.sock" - cluster = "example-cluster" - mode = "crd" - pod_annotation = "spiffe.io/spiffe-id" - - -# apiVersion: v1 -# kind: ConfigMap -# metadata: -# name: k8s-workload-registrar -# namespace: spire -# data: -# k8s-workload-registrar.conf: | -# trust_domain = "example.org" -# server_socket_path = "/tmp/spire-server/private/api.sock" -# cluster = "k8stest" -# mode = "reconcile" -# pod_label = "spire-workload" -# metrics_addr = "0" diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-statefulset.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-statefulset.yaml deleted file mode 100644 index 5220024..0000000 --- a/k8s/k8s-workload-registrar/mode-crd/k8s/k8s-workload-registrar-statefulset.yaml +++ /dev/null @@ -1,107 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: spire-server - namespace: spire - labels: - app: spire-server -spec: - replicas: 1 - selector: - matchLabels: - app: spire-server - serviceName: spire-server - template: - metadata: - namespace: spire - labels: - app: spire-server - spec: - serviceAccountName: spire-server - shareProcessNamespace: true - containers: - - name: spire-server - image: gcr.io/spiffe-io/spire-server:0.12.0 - args: - - -config - - /run/spire/config/server.conf - livenessProbe: - exec: - command: ["/opt/spire/bin/spire-server", "healthcheck", "-registrationUDSPath", "/tmp/spire-server/private/api.sock"] - failureThreshold: 2 - initialDelaySeconds: 15 - periodSeconds: 60 - timeoutSeconds: 3 - readinessProbe: - exec: - command: ["/opt/spire/bin/spire-server", "healthcheck", "--shallow", "-registrationUDSPath", "/tmp/spire-server/private/api.sock"] - initialDelaySeconds: 5 - periodSeconds: 5 - ports: - - containerPort: 8081 - volumeMounts: - - name: spire-config - mountPath: /run/spire/config - readOnly: true - - name: spire-secrets - mountPath: /run/spire/secrets - readOnly: true - - name: spire-data - mountPath: /run/spire/data - readOnly: false - - name: spire-registration-socket - mountPath: /tmp/spire-server/private - readOnly: false - - name: k8s-workload-registrar - # image: fmemon/k8s-workload-registrar:latest - image: gcr.io/spiffe-io/k8s-workload-registrar:0.12.0 - args: - - -config - - /run/spire/config/k8s-workload-registrar.conf - # env: - # - name: MY_POD_UID - # valueFrom: - # fieldRef: - # fieldPath: metadata.uid - ports: - - containerPort: 9443 - name: webhook - protocol: TCP - volumeMounts: - - mountPath: /run/spire/config - name: k8s-workload-registrar-config - readOnly: true - # - mountPath: /tmp/spire-agent/public # is it necessary?? i think not - # name: spire-agent-socket - # readOnly: true - - name: spire-registration-socket - mountPath: /tmp/spire-server/private - readOnly: true - volumes: - - name: spire-config - configMap: - name: spire-server - - name: spire-secrets - secret: - secretName: spire-server - # - name: spire-agent-socket - # hostPath: - # path: /run/spire/agent-sockets - # type: DirectoryOrCreate - - name: k8s-workload-registrar-config - configMap: - name: k8s-workload-registrar - - name: spire-registration-socket - hostPath: - path: /run/spire/server-sockets - type: DirectoryOrCreate - volumeClaimTemplates: - - metadata: - name: spire-data - namespace: spire - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/spire-agent.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/spire-agent.yaml index 532e640..8f75932 100644 --- a/k8s/k8s-workload-registrar/mode-crd/k8s/spire-agent.yaml +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/spire-agent.yaml @@ -1,3 +1,4 @@ +# ServiceAccount for the SPIRE agent apiVersion: v1 kind: ServiceAccount metadata: @@ -13,7 +14,7 @@ metadata: name: spire-agent-cluster-role rules: - apiGroups: [""] - resources: ["pods","nodes", "nodes/proxy"] + resources: ["pods","nodes","nodes/proxy"] verbs: ["get"] --- @@ -46,9 +47,9 @@ data: log_level = "DEBUG" server_address = "spire-server" server_port = "8081" - socket_path = "/tmp/spire-agent/public/api.sock" - trust_bundle_path = "/run/spire/config/bootstrap.crt" + trust_bundle_path = "/run/spire/bundle/bundle.crt" trust_domain = "example.org" + socket_path = "/tmp/spire-agent/public/api.sock" } plugins { @@ -65,31 +66,21 @@ data: WorkloadAttestor "k8s" { plugin_data { - # Defaults to the secure kubelet port by default. - # Minikube does not have a cert in the cluster CA bundle that - # can authenticate the kubelet cert, so skip validation. - skip_kubelet_verification = true + # Defaults to the secure kubelet port by default. + # Minikube does not have a cert in the cluster CA bundle that + # can authenticate the kubelet cert, so skip validation. + skip_kubelet_verification = true } } + } - WorkloadAttestor "unix" { - plugin_data { - } - } + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" } - bootstrap.crt: | - -----BEGIN CERTIFICATE----- - MIIBzDCCAVOgAwIBAgIJAJM4DhRH0vmuMAoGCCqGSM49BAMEMB4xCzAJBgNVBAYT - AlVTMQ8wDQYDVQQKDAZTUElGRkUwHhcNMTgwNTEzMTkzMzQ3WhcNMjMwNTEyMTkz - MzQ3WjAeMQswCQYDVQQGEwJVUzEPMA0GA1UECgwGU1BJRkZFMHYwEAYHKoZIzj0C - AQYFK4EEACIDYgAEWjB+nSGSxIYiznb84xu5WGDZj80nL7W1c3zf48Why0ma7Y7m - CBKzfQkrgDguI4j0Z+0/tDH/r8gtOtLLrIpuMwWHoe4vbVBFte1vj6Xt6WeE8lXw - cCvLs/mcmvPqVK9jo10wWzAdBgNVHQ4EFgQUh6XzV6LwNazA+GTEVOdu07o5yOgw - DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwGQYDVR0RBBIwEIYOc3Bp - ZmZlOi8vbG9jYWwwCgYIKoZIzj0EAwQDZwAwZAIwE4Me13qMC9i6Fkx0h26y09QZ - IbuRqA9puLg9AeeAAyo5tBzRl1YL0KNEp02VKSYJAjBdeJvqjJ9wW55OGj1JQwDF - D7kWeEB6oMlwPbI/5hEY3azJi16I0uN1JSYTSWGSqWc= - -----END CERTIFICATE----- --- @@ -104,14 +95,18 @@ spec: selector: matchLabels: app: spire-agent + updateStrategy: + type: RollingUpdate template: metadata: namespace: spire labels: app: spire-agent + spire-workload: agent annotations: spiffe.io/spiffe-id: "testing/agent" spec: + # hostPID is required for K8S Workload Attestation. hostPID: true hostNetwork: true dnsPolicy: ClusterFirstWithHostNet @@ -132,35 +127,41 @@ spec: - name: spire-config mountPath: /run/spire/config readOnly: true + - name: spire-bundle + mountPath: /run/spire/bundle + readOnly: true - name: spire-agent-socket mountPath: /tmp/spire-agent/public readOnly: false - name: spire-token mountPath: /var/run/secrets/tokens livenessProbe: - exec: - command: ["/opt/spire/bin/spire-agent", "healthcheck", "-socketPath", "/tmp/spire-agent/public/api.sock"] - failureThreshold: 2 - initialDelaySeconds: 15 - periodSeconds: 60 - timeoutSeconds: 3 + httpGet: + path: /live + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 10 readinessProbe: - exec: - command: ["/opt/spire/bin/spire-agent", "healthcheck", "-socketPath", "/tmp/spire-agent/public/api.sock", "--shallow"] - initialDelaySeconds: 5 - periodSeconds: 5 + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 10 volumes: - name: spire-config configMap: name: spire-agent + - name: spire-bundle + configMap: + name: spire-bundle - name: spire-agent-socket hostPath: - path: /tmp/spire-agent/public + path: /run/spire/agent-sockets type: DirectoryOrCreate - name: spire-token projected: - sources: - - serviceAccountToken: - path: spire-agent - expirationSeconds: 7200 - audience: spire-server + sources: + - serviceAccountToken: + path: spire-agent + expirationSeconds: 7200 + audience: spire-server diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/spire-server.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/spire-server.yaml index 66ac849..e53aef6 100644 --- a/k8s/k8s-workload-registrar/mode-crd/k8s/spire-server.yaml +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/spire-server.yaml @@ -1,3 +1,4 @@ +# ServiceAccount used by the SPIRE server. apiVersion: v1 kind: ServiceAccount metadata: @@ -6,48 +7,93 @@ metadata: --- -# Required cluster role to allow spire-server to query k8s API server -kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole metadata: - name: spire-server-cluster-role + name: k8s-workload-registrar-role rules: -- apiGroups: [""] - resources: ["pods", "nodes"] - verbs: ["get"] + # allow TokenReview requests (to verify service account tokens for PSAT + # attestation) - apiGroups: ["authentication.k8s.io"] resources: ["tokenreviews"] - verbs: ["create"] + verbs: ["get", "create"] +- apiGroups: [""] + resources: ["endpoints", "nodes", "pods"] + verbs: ["get", "list", "watch"] +- apiGroups: ["spiffeid.spiffe.io"] + resources: ["spiffeids"] + verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] +- apiGroups: ["spiffeid.spiffe.io"] + resources: ["spiffeids/status"] + verbs: ["get", "patch", "update"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["get", "list", "update", "watch"] --- -# Binds above cluster role to spire-server service account +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + name: k8s-workload-registrar-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: k8s-workload-registrar-role +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire + +--- + +# Role for the SPIRE server +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: spire + name: spire-server-role +rules: + # allow "get" access to pods (to resolve selectors for PSAT attestation) +- apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE + # agent bootstrapping, see the spire-bundle ConfigMap below) +- apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["spire-bundle"] + verbs: ["get", "patch"] + +--- + +# RoleBinding granting the spire-server-role to the SPIRE server +# service account. +kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: spire-server-cluster-role-binding + name: spire-server-role-binding + namespace: spire subjects: - kind: ServiceAccount name: spire-server namespace: spire roleRef: - kind: ClusterRole - name: spire-server-cluster-role + kind: Role + name: spire-server-role apiGroup: rbac.authorization.k8s.io --- apiVersion: v1 -kind: Secret +kind: ConfigMap metadata: - name: spire-server + name: spire-bundle namespace: spire -type: Opaque -data: - bootstrap.key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1JR2tBZ0VCQkRBZzJMYnVsWHpRWDFORisyRGkwUkt6TVdmRUdpb0JoaC9mRnB4N3lPRXFrYS8vVHBhZVUzTzUKUUpSWlhkV0hLdWFnQndZRks0RUVBQ0toWkFOaUFBUmFNSDZkSVpMRWhpTE9kdnpqRzdsWVlObVB6U2N2dGJWegpmTi9qeGFITFNacnRqdVlJRXJOOUNTdUFPQzRqaVBSbjdUKzBNZit2eUMwNjBzdXNpbTR6QlllaDdpOXRVRVcxCjdXK1BwZTNwWjRUeVZmQndLOHV6K1p5YTgrcFVyMk09Ci0tLS0tRU5EIEVDIFBSSVZBVEUgS0VZLS0tLS0K --- +# ConfigMap containing the SPIRE server configuration. apiVersion: v1 kind: ConfigMap metadata: @@ -62,11 +108,12 @@ data: data_dir = "/run/spire/data" log_level = "DEBUG" default_svid_ttl = "1h" + ca_ttl = "12h" registration_uds_path = "/tmp/spire-server/private/api.sock" - ca_subject = { - country = ["US"], - organization = ["SPIFFE"], - common_name = "", + ca_subject { + country = ["US"] + organization = ["SPIFFE"] + common_name = "" } } @@ -88,107 +135,123 @@ data: } } - NodeResolver "noop" { - plugin_data {} - } - KeyManager "disk" { plugin_data { keys_path = "/run/spire/data/keys.json" } } - UpstreamAuthority "disk" { + Notifier "k8sbundle" { plugin_data { - key_file_path = "/run/spire/secrets/bootstrap.key" - cert_file_path = "/run/spire/config/bootstrap.crt" + # This plugin updates the bundle.crt value in the spire:spire-bundle + # ConfigMap by default, so no additional configuration is necessary. } } } - bootstrap.crt: | - -----BEGIN CERTIFICATE----- - MIIBzDCCAVOgAwIBAgIJAJM4DhRH0vmuMAoGCCqGSM49BAMEMB4xCzAJBgNVBAYT - AlVTMQ8wDQYDVQQKDAZTUElGRkUwHhcNMTgwNTEzMTkzMzQ3WhcNMjMwNTEyMTkz - MzQ3WjAeMQswCQYDVQQGEwJVUzEPMA0GA1UECgwGU1BJRkZFMHYwEAYHKoZIzj0C - AQYFK4EEACIDYgAEWjB+nSGSxIYiznb84xu5WGDZj80nL7W1c3zf48Why0ma7Y7m - CBKzfQkrgDguI4j0Z+0/tDH/r8gtOtLLrIpuMwWHoe4vbVBFte1vj6Xt6WeE8lXw - cCvLs/mcmvPqVK9jo10wWzAdBgNVHQ4EFgQUh6XzV6LwNazA+GTEVOdu07o5yOgw - DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwGQYDVR0RBBIwEIYOc3Bp - ZmZlOi8vbG9jYWwwCgYIKoZIzj0EAwQDZwAwZAIwE4Me13qMC9i6Fkx0h26y09QZ - IbuRqA9puLg9AeeAAyo5tBzRl1YL0KNEp02VKSYJAjBdeJvqjJ9wW55OGj1JQwDF - D7kWeEB6oMlwPbI/5hEY3azJi16I0uN1JSYTSWGSqWc= - -----END CERTIFICATE----- + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-workload-registrar + namespace: spire +data: + k8s-workload-registrar.conf: | + trust_domain = "example.org" + server_socket_path = "/tmp/spire-server/private/api.sock" + cluster = "example-cluster" + mode = "crd" + pod_annotation = "spiffe.io/spiffe-id" + metrics_bind_addr = "0" --- -# apiVersion: apps/v1 -# kind: StatefulSet -# metadata: -# name: spire-server -# namespace: spire -# labels: -# app: spire-server -# spec: -# replicas: 1 -# selector: -# matchLabels: -# app: spire-server -# serviceName: spire-server -# template: -# metadata: -# namespace: spire -# labels: -# app: spire-server -# spec: -# serviceAccountName: spire-server -# containers: -# - name: spire-server -# image: gcr.io/spiffe-io/spire-server:0.11.0 -# args: ["-config", "/run/spire/config/server.conf"] -# ports: -# - containerPort: 8081 -# volumeMounts: -# - name: spire-config -# mountPath: /run/spire/config -# readOnly: true -# - name: spire-secrets -# mountPath: /run/spire/secrets -# readOnly: true -# - name: spire-data -# mountPath: /run/spire/data -# readOnly: false -# livenessProbe: -# exec: -# command: ["/opt/spire/bin/spire-server", "healthcheck"] -# failureThreshold: 2 -# initialDelaySeconds: 15 -# periodSeconds: 60 -# timeoutSeconds: 3 -# readinessProbe: -# exec: -# command: ["/opt/spire/bin/spire-server", "healthcheck", "--shallow"] -# initialDelaySeconds: 5 -# periodSeconds: 5 -# volumes: -# - name: spire-config -# configMap: -# name: spire-server -# - name: spire-secrets -# secret: -# secretName: spire-server -# volumeClaimTemplates: -# - metadata: -# name: spire-data -# namespace: spire -# spec: -# accessModes: -# - ReadWriteOnce -# resources: -# requests: -# storage: 1Gi - -# --- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: spire-server + namespace: spire + labels: + app: spire-server +spec: + replicas: 1 + selector: + matchLabels: + app: spire-server + serviceName: spire-server + template: + metadata: + namespace: spire + labels: + app: spire-server + spec: + serviceAccountName: spire-server + shareProcessNamespace: true + containers: + - name: spire-server + image: gcr.io/spiffe-io/spire-server:0.12.0 + args: + - -config + - /run/spire/config/server.conf + livenessProbe: + httpGet: + path: /live + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + ports: + - containerPort: 8081 + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-registration-socket + mountPath: /tmp/spire-server/private + readOnly: false + - name: k8s-workload-registrar + image: gcr.io/spiffe-io/k8s-workload-registrar:0.12.0 + args: ["-config", "/run/spire/k8s-workload-registrar/conf/k8s-workload-registrar.conf"] + ports: + - containerPort: 9443 + name: webhook + protocol: TCP + volumeMounts: + - name: spire-registration-socket + mountPath: /tmp/spire-server/private + readOnly: true + - name: k8s-workload-registrar + mountPath: /run/spire/k8s-workload-registrar/conf + readOnly: true + volumes: + - name: k8s-workload-registrar + configMap: + name: k8s-workload-registrar + - name: spire-registration-socket + hostPath: + path: /run/spire/server-sockets + type: DirectoryOrCreate + - name: spire-config + configMap: + name: spire-server + +--- +# Service definition for SPIRE server defining the gRPC port. apiVersion: v1 kind: Service metadata: diff --git a/k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh b/k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh old mode 100644 new mode 100755 index 2517f8c..bb09c72 --- a/k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh +++ b/k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh @@ -1,14 +1,14 @@ -kubectl apply -f k8s/namespace.yaml -kubectl apply -f k8s/spiffeid.spiffe.io_spiffeids.yaml -kubectl apply -f k8s/k8s-workload-registrar-cluster-role.yaml -kubectl apply -f k8s/spire-server.yaml -kubectl apply -f k8s/k8s-workload-registrar-configmap.yaml -kubectl apply -f k8s/k8s-workload-registrar-statefulset.yaml +#!/bin/bash -kubectl rollout status statefulset/spire-server -n spire +PARENT_DIR="$(dirname "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )")" -kubectl apply -f k8s/spire-agent.yaml +kind create cluster --name example-cluster +kubectl apply -f "${PARENT_DIR}"/k8s/namespace.yaml +kubectl apply -f "${PARENT_DIR}"/k8s/spiffeid.spiffe.io_spiffeids.yaml +kubectl apply -f "${PARENT_DIR}"/k8s/spire-server.yaml +kubectl rollout status statefulset/spire-server -n spire +kubectl apply -f "${PARENT_DIR}"/k8s/spire-agent.yaml kubectl rollout status daemonset/spire-agent -n spire -kubectl apply -f k8s/workload.yaml # doesnt work +kubectl apply -f "${PARENT_DIR}"/k8s/workload.yaml diff --git a/k8s/k8s-workload-registrar/mode-crd/scripts/test.sh b/k8s/k8s-workload-registrar/mode-crd/scripts/test.sh new file mode 100755 index 0000000..301555b --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/scripts/test.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +bold=$(tput bold) || true +norm=$(tput sgr0) || true +red=$(tput setaf 1) || true +green=$(tput setaf 2) || true + +set_env() { + echo "${bold}Setting up CRD mode environment...${norm}" + "${DIR}"/deploy-scenario.sh +} + +cleanup() { + echo "${bold}Cleaning up...${norm}" + kind delete cluster --name example-cluster +} + +trap cleanup EXIT + +cleanup +set_env + +NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane" +AGENT_SPIFFE_ID="spiffe://example.org/testing/agent" +WORKLOAD_SPIFFE_ID="spiffe://example.org/testing/example-workload" + +MAX_FETCH_CHECKS=60 +FETCH_CHECK_INTERVAL=5 + +for ((i=0;i<"$MAX_FETCH_CHECKS";i++)); do + if [[ -n $(kubectl exec -t statefulset/spire-server -n spire -c spire-server -- \ + /opt/spire/bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock \ + | grep "$NODE_SPIFFE_ID") ]] && + [[ -n $(kubectl exec -t daemonset/spire-agent -n spire -- \ + /opt/spire/bin/spire-agent api fetch -socketPath /tmp/spire-agent/public/api.sock \ + | grep "$AGENT_SPIFFE_ID") ]] && + [[ -n $(kubectl exec -t deployment/example-workload -n spire -- \ + /opt/spire/bin/spire-agent api fetch -socketPath /tmp/spire-agent/public/api.sock \ + | grep "$WORKLOAD_SPIFFE_ID") ]]; then + DONE=1 + break + fi + sleep "$FETCH_CHECK_INTERVAL" +done + +if [ "${DONE}" -eq 1 ]; then + echo "${green}CRD mode test succeeded.${norm}" +else + echo "${red}CRD mode test failed.${norm}" + exit 1 +fi + + exit 0 \ No newline at end of file diff --git a/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-server.yaml b/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-server.yaml index 39adaa3..c7f3e36 100644 --- a/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-server.yaml +++ b/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-server.yaml @@ -180,6 +180,7 @@ data: mode = "reconcile" pod_label = "spire-workload" metrics_addr = "0" + controller_name = "k8s-workload-registrar" --- @@ -187,7 +188,7 @@ data: # initialize and uses the SPIRE healthcheck command for liveness/readiness # probes. apiVersion: apps/v1 -kind: Deployment +kind: StatefulSet metadata: name: spire-server namespace: spire @@ -198,6 +199,7 @@ spec: selector: matchLabels: app: spire-server + serviceName: spire-server template: metadata: namespace: spire @@ -275,18 +277,3 @@ spec: protocol: TCP selector: app: spire-server - ---- - -# Service definition for the admission webhook -apiVersion: v1 -kind: Service -metadata: - name: k8s-workload-registrar - namespace: spire -spec: - selector: - app: spire-server - ports: - - port: 443 - targetPort: registrar-port diff --git a/k8s/k8s-workload-registrar/mode-reconcile/scripts/deploy-scenario.sh b/k8s/k8s-workload-registrar/mode-reconcile/scripts/deploy-scenario.sh index 998e16b..6a13bef 100755 --- a/k8s/k8s-workload-registrar/mode-reconcile/scripts/deploy-scenario.sh +++ b/k8s/k8s-workload-registrar/mode-reconcile/scripts/deploy-scenario.sh @@ -1,9 +1,14 @@ -kubectl apply -f k8s/namespace.yaml -kubectl apply -f k8s/spire-server.yaml -kubectl rollout status deployment/spire-server -n spire +#!/bin/bash -kubectl apply -f k8s/spire-agent.yaml +PARENT_DIR="$(dirname "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )")" + +kind create cluster --name example-cluster +kubectl apply -f "${PARENT_DIR}"/k8s/namespace.yaml +kubectl apply -f "${PARENT_DIR}"/k8s/spire-server.yaml +kubectl rollout status statefulset/spire-server -n spire + +kubectl apply -f "${PARENT_DIR}"/k8s/spire-agent.yaml kubectl rollout status daemonset/spire-agent -n spire -kubectl apply -f k8s/workload.yaml +kubectl apply -f "${PARENT_DIR}"/k8s/workload.yaml kubectl rollout status deployment/example-workload -n spire diff --git a/k8s/k8s-workload-registrar/mode-reconcile/scripts/test.sh b/k8s/k8s-workload-registrar/mode-reconcile/scripts/test.sh new file mode 100755 index 0000000..5adadd9 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-reconcile/scripts/test.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +bold=$(tput bold) || true +norm=$(tput sgr0) || true +red=$(tput setaf 1) || true +green=$(tput setaf 2) || true + +set_env() { + echo "${bold}Setting up reconcile environment...${norm}" + "${DIR}"/deploy-scenario.sh +} + +cleanup() { + echo "${bold}Cleaning up...${norm}" + kind delete cluster --name example-cluster +} + +trap cleanup EXIT + +cleanup +set_env + +NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane" +AGENT_SPIFFE_ID="spiffe://example.org/agent" +WORKLOAD_SPIFFE_ID="spiffe://example.org/example-workload" + +MAX_FETCH_CHECKS=60 +FETCH_CHECK_INTERVAL=5 + +for ((i=0;i<"$MAX_FETCH_CHECKS";i++)); do + if [[ -n $(kubectl exec -t statefulset/spire-server -n spire -c spire-server -- \ + /opt/spire/bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock \ + | grep "$NODE_SPIFFE_ID") ]] && + [[ -n $(kubectl exec -t daemonset/spire-agent -n spire -- \ + /opt/spire/bin/spire-agent api fetch -socketPath /tmp/spire-agent/public/api.sock \ + | grep "$AGENT_SPIFFE_ID") ]] && + [[ -n $(kubectl exec -t deployment/example-workload -n spire -- \ + /opt/spire/bin/spire-agent api fetch -socketPath /tmp/spire-agent/public/api.sock \ + | grep "$WORKLOAD_SPIFFE_ID") ]]; then + DONE=1 + break + fi + sleep "$FETCH_CHECK_INTERVAL" +done + +if [ "${DONE}" -eq 1 ]; then + echo "${green}Reconcile mode test succeeded.${norm}" +else + echo "${red}Reconcile mode test failed.${norm}" + exit 1 +fi + +exit 0 diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-agent.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-agent.yaml index c283e33..fb10424 100644 --- a/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-agent.yaml +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-agent.yaml @@ -33,7 +33,6 @@ roleRef: name: spire-agent-cluster-role apiGroup: rbac.authorization.k8s.io - --- # ConfigMap for the SPIRE agent featuring: diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-server.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-server.yaml index 8dca6e5..f889385 100644 --- a/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-server.yaml +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-server.yaml @@ -217,7 +217,7 @@ data: # initialize and uses the SPIRE healthcheck command for liveness/readiness # probes. apiVersion: apps/v1 -kind: Deployment +kind: StatefulSet metadata: name: spire-server namespace: spire @@ -228,6 +228,7 @@ spec: selector: matchLabels: app: spire-server + serviceName: spire-server template: metadata: namespace: spire diff --git a/k8s/k8s-workload-registrar/kind-config.yaml b/k8s/k8s-workload-registrar/mode-webhook/kind-config.yaml similarity index 93% rename from k8s/k8s-workload-registrar/kind-config.yaml rename to k8s/k8s-workload-registrar/mode-webhook/kind-config.yaml index d91d6ae..799c5f8 100644 --- a/k8s/k8s-workload-registrar/kind-config.yaml +++ b/k8s/k8s-workload-registrar/mode-webhook/kind-config.yaml @@ -16,4 +16,4 @@ nodes: - role: control-plane extraMounts: - containerPath: /etc/kubernetes/pki/admctrl - hostPath: K8SDIR/admctrl + hostPath: WEBHOOKDIR/k8s/admctrl diff --git a/k8s/k8s-workload-registrar/mode-webhook/scripts/create-cluster.sh b/k8s/k8s-workload-registrar/mode-webhook/scripts/create-cluster.sh new file mode 100755 index 0000000..94c0c67 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-webhook/scripts/create-cluster.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +PARENT_DIR="$(dirname "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )")" +sed -i.bak "s#WEBHOOKDIR#${PARENT_DIR}#g" "${PARENT_DIR}"/kind-config.yaml + +rm "${PARENT_DIR}"/kind-config.yaml.bak +kind create cluster --name example-cluster --config "${PARENT_DIR}"/kind-config.yaml diff --git a/k8s/k8s-workload-registrar/mode-webhook/scripts/deploy-scenario.sh b/k8s/k8s-workload-registrar/mode-webhook/scripts/deploy-scenario.sh index fc49187..1c8311c 100755 --- a/k8s/k8s-workload-registrar/mode-webhook/scripts/deploy-scenario.sh +++ b/k8s/k8s-workload-registrar/mode-webhook/scripts/deploy-scenario.sh @@ -1,11 +1,16 @@ -kubectl apply -f k8s/namespace.yaml -kubectl apply -f k8s/k8s-workload-registrar-secret.yaml -kubectl apply -f k8s/spire-server.yaml -kubectl rollout status deployment/spire-server -n spire +#!/bin/bash +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PARENT_DIR="$(dirname "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )")" -kubectl apply -f k8s/validation-webhook.yaml -kubectl apply -f k8s/spire-agent.yaml +bash "${DIR}"/create-cluster.sh +kubectl apply -f "${PARENT_DIR}"/k8s/namespace.yaml +kubectl apply -f "${PARENT_DIR}"/k8s/k8s-workload-registrar-secret.yaml +kubectl apply -f "${PARENT_DIR}"/k8s/spire-server.yaml +kubectl rollout status statefulset/spire-server -n spire + +kubectl apply -f "${PARENT_DIR}"/k8s/validation-webhook.yaml +kubectl apply -f "${PARENT_DIR}"/k8s/spire-agent.yaml kubectl rollout status daemonset/spire-agent -n spire -kubectl apply -f k8s/workload.yaml +kubectl apply -f "${PARENT_DIR}"/k8s/workload.yaml kubectl rollout status deployment/example-workload -n spire diff --git a/k8s/k8s-workload-registrar/mode-webhook/scripts/test.sh b/k8s/k8s-workload-registrar/mode-webhook/scripts/test.sh new file mode 100755 index 0000000..2b7bb44 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-webhook/scripts/test.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +bold=$(tput bold) || true +norm=$(tput sgr0) || true +red=$(tput setaf 1) || true +green=$(tput setaf 2) || true + +set_env() { + echo "${bold}Setting up webhook environment...${norm}" + "${DIR}"/deploy-scenario.sh +} + +cleanup() { + echo "${bold}Cleaning up...${norm}" + kind delete cluster --name example-cluster +} + +trap cleanup EXIT + +cleanup +set_env + +NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/example-cluster/node" +AGENT_SPIFFE_ID="spiffe://example.org/ns/spire/sa/spire-agent" +WORKLOAD_SPIFFE_ID="spiffe://example.org/ns/spire/sa/default" + +MAX_FETCH_CHECKS=60 +FETCH_CHECK_INTERVAL=5 + +for ((i=0;i<"$MAX_FETCH_CHECKS";i++)); do + if [[ -n $(kubectl exec -t statefulset/spire-server -n spire -c spire-server -- \ + /opt/spire/bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock \ + | grep "$NODE_SPIFFE_ID") ]] && + [[ -n $(kubectl exec -t daemonset/spire-agent -n spire -- \ + /opt/spire/bin/spire-agent api fetch -socketPath /tmp/spire-agent/public/api.sock \ + | grep "$AGENT_SPIFFE_ID") ]] && + [[ -n $(kubectl exec -t deployment/example-workload -n spire -- \ + /opt/spire/bin/spire-agent api fetch -socketPath /tmp/spire-agent/public/api.sock \ + | grep "$WORKLOAD_SPIFFE_ID") ]]; then + DONE=1 + break + fi + sleep "$FETCH_CHECK_INTERVAL" +done + +if [ "${DONE}" -eq 1 ]; then + echo "${green}Webhook mode test succeeded.${norm}" +else + echo "${red}Webhook mode test failed.${norm}" + exit 1 +fi + +exit 0 diff --git a/k8s/k8s-workload-registrar/test.sh b/k8s/k8s-workload-registrar/test.sh new file mode 100755 index 0000000..9390b48 --- /dev/null +++ b/k8s/k8s-workload-registrar/test.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +bold=$(tput bold) || true +norm=$(tput sgr0) || true +red=$(tput setaf 1) || true +green=$(tput setaf 2) || true + +fail() { + echo "${red}$*${norm}." + exit 1 +} + +for testdir in "${DIR}"/*; do + if [[ -x "${testdir}/scripts/test.sh" ]]; then + testname=$(basename "$testdir") + echo "${bold}Running \"$testname\" test...${norm}" + if ${testdir}/scripts/test.sh; then + echo "${green}\"$testname\" test succeeded${norm}" + else + echo "${red}\"$testname\" test failed${norm}" + FAILED=true + fi + fi +done + +if [ -n "${FAILED}" ]; then + fail "There were test failures" +fi +echo "${green}Done. Kubernetes workload registrar tests passed!${norm}" \ No newline at end of file From efcb6c2fa3ad5f93551d6394c2158f2167396f08 Mon Sep 17 00:00:00 2001 From: Luciano Date: Thu, 10 Jun 2021 21:27:49 -0300 Subject: [PATCH 4/5] switch to minikube Signed-off-by: Luciano --- .travis.yml | 2 -- .../mode-crd/scripts/delete-scenario.sh | 22 +++++++++++++++++++ .../mode-crd/scripts/deploy-scenario.sh | 1 - .../mode-crd/{scripts => }/test.sh | 14 +++++++----- .../mode-reconcile/scripts/delete-scenario.sh | 10 +++++++++ .../mode-reconcile/scripts/deploy-scenario.sh | 1 - .../mode-reconcile/{scripts => }/test.sh | 12 +++++----- .../k8s/admctrl/admission-control.yaml | 2 +- .../mode-webhook/kind-config.yaml | 19 ---------------- .../mode-webhook/scripts/create-cluster.sh | 7 ------ .../mode-webhook/scripts/delete-scenario.sh | 11 ++++++++++ .../mode-webhook/scripts/deploy-scenario.sh | 17 +++++++++++++- .../mode-webhook/{scripts => }/test.sh | 10 ++++----- k8s/k8s-workload-registrar/test.sh | 5 ++--- 14 files changed, 79 insertions(+), 54 deletions(-) create mode 100755 k8s/k8s-workload-registrar/mode-crd/scripts/delete-scenario.sh rename k8s/k8s-workload-registrar/mode-crd/{scripts => }/test.sh (84%) create mode 100755 k8s/k8s-workload-registrar/mode-reconcile/scripts/delete-scenario.sh rename k8s/k8s-workload-registrar/mode-reconcile/{scripts => }/test.sh (84%) delete mode 100644 k8s/k8s-workload-registrar/mode-webhook/kind-config.yaml delete mode 100755 k8s/k8s-workload-registrar/mode-webhook/scripts/create-cluster.sh create mode 100755 k8s/k8s-workload-registrar/mode-webhook/scripts/delete-scenario.sh rename k8s/k8s-workload-registrar/mode-webhook/{scripts => }/test.sh (87%) diff --git a/.travis.yml b/.travis.yml index 1041cf9..f7a9815 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,8 +25,6 @@ before_script: - minikube update-context # Wait for Kubernetes to be up and ready. - JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 1; done - # Download kind - - curl -Lo kind https://kind.sigs.k8s.io/dl/v0.10.0/kind-linux-amd64 && chmod +x kind && sudo mv kind /usr/local/bin/ script: - ./k8s/test-all.sh diff --git a/k8s/k8s-workload-registrar/mode-crd/scripts/delete-scenario.sh b/k8s/k8s-workload-registrar/mode-crd/scripts/delete-scenario.sh new file mode 100755 index 0000000..b7514bd --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-crd/scripts/delete-scenario.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +PARENT_DIR="$(dirname "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )")" + +kubectl delete -f "${PARENT_DIR}"/k8s/workload.yaml --ignore-not-found + +kubectl delete -f "${PARENT_DIR}"/k8s/spire-agent.yaml --ignore-not-found + +kubectl delete -f "${PARENT_DIR}"/k8s/spire-server.yaml --ignore-not-found + +SPIFFE_ID_CRDS=$(kubectl get spiffeids --no-headers -o custom-columns=":metadata.name" -n spire) +for SPIFFE_ID_CRD in $SPIFFE_ID_CRDS +do + kubectl patch spiffeid.spiffeid.spiffe.io/"${SPIFFE_ID_CRD}" --type=merge -p '{"metadata":{"finalizers":[]}}' -n spire + kubectl delete spiffeid "${SPIFFE_ID_CRD}" -n spire --ignore-not-found +done + +kubectl patch customresourcedefinition.apiextensions.k8s.io/spiffeids.spiffeid.spiffe.io --type=merge -p '{"metadata":{"finalizers":[]}}' +kubectl delete -f "${PARENT_DIR}"/k8s/spiffeid.spiffe.io_spiffeids.yaml --ignore-not-found + + +kubectl delete -f "${PARENT_DIR}"/k8s/namespace.yaml --ignore-not-found diff --git a/k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh b/k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh index bb09c72..33bae24 100755 --- a/k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh +++ b/k8s/k8s-workload-registrar/mode-crd/scripts/deploy-scenario.sh @@ -2,7 +2,6 @@ PARENT_DIR="$(dirname "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )")" -kind create cluster --name example-cluster kubectl apply -f "${PARENT_DIR}"/k8s/namespace.yaml kubectl apply -f "${PARENT_DIR}"/k8s/spiffeid.spiffe.io_spiffeids.yaml kubectl apply -f "${PARENT_DIR}"/k8s/spire-server.yaml diff --git a/k8s/k8s-workload-registrar/mode-crd/scripts/test.sh b/k8s/k8s-workload-registrar/mode-crd/test.sh similarity index 84% rename from k8s/k8s-workload-registrar/mode-crd/scripts/test.sh rename to k8s/k8s-workload-registrar/mode-crd/test.sh index 301555b..6dbd3be 100755 --- a/k8s/k8s-workload-registrar/mode-crd/scripts/test.sh +++ b/k8s/k8s-workload-registrar/mode-crd/test.sh @@ -7,14 +7,16 @@ norm=$(tput sgr0) || true red=$(tput setaf 1) || true green=$(tput setaf 2) || true +echo $DIR + set_env() { echo "${bold}Setting up CRD mode environment...${norm}" - "${DIR}"/deploy-scenario.sh + "${DIR}"/scripts/deploy-scenario.sh > /dev/null } cleanup() { echo "${bold}Cleaning up...${norm}" - kind delete cluster --name example-cluster + "${DIR}"/scripts/delete-scenario.sh > /dev/null } trap cleanup EXIT @@ -22,7 +24,7 @@ trap cleanup EXIT cleanup set_env -NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane" +NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/example-cluster/node/" AGENT_SPIFFE_ID="spiffe://example.org/testing/agent" WORKLOAD_SPIFFE_ID="spiffe://example.org/testing/example-workload" @@ -33,7 +35,7 @@ for ((i=0;i<"$MAX_FETCH_CHECKS";i++)); do if [[ -n $(kubectl exec -t statefulset/spire-server -n spire -c spire-server -- \ /opt/spire/bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock \ | grep "$NODE_SPIFFE_ID") ]] && - [[ -n $(kubectl exec -t daemonset/spire-agent -n spire -- \ + [[ -n $(kubectl exec -t daemonset/spire-agent -n spire -c spire-agent -- \ /opt/spire/bin/spire-agent api fetch -socketPath /tmp/spire-agent/public/api.sock \ | grep "$AGENT_SPIFFE_ID") ]] && [[ -n $(kubectl exec -t deployment/example-workload -n spire -- \ @@ -46,10 +48,10 @@ for ((i=0;i<"$MAX_FETCH_CHECKS";i++)); do done if [ "${DONE}" -eq 1 ]; then - echo "${green}CRD mode test succeeded.${norm}" + exit 0 else echo "${red}CRD mode test failed.${norm}" exit 1 fi - exit 0 \ No newline at end of file + exit 0 diff --git a/k8s/k8s-workload-registrar/mode-reconcile/scripts/delete-scenario.sh b/k8s/k8s-workload-registrar/mode-reconcile/scripts/delete-scenario.sh new file mode 100755 index 0000000..7e1e089 --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-reconcile/scripts/delete-scenario.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +PARENT_DIR="$(dirname "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )")" + +kubectl delete -f "${PARENT_DIR}"/k8s/workload.yaml --ignore-not-found + +kubectl delete -f "${PARENT_DIR}"/k8s/spire-agent.yaml --ignore-not-found + +kubectl delete -f "${PARENT_DIR}"/k8s/spire-server.yaml --ignore-not-found +kubectl delete -f "${PARENT_DIR}"/k8s/namespace.yaml --ignore-not-found diff --git a/k8s/k8s-workload-registrar/mode-reconcile/scripts/deploy-scenario.sh b/k8s/k8s-workload-registrar/mode-reconcile/scripts/deploy-scenario.sh index 6a13bef..bbabc88 100755 --- a/k8s/k8s-workload-registrar/mode-reconcile/scripts/deploy-scenario.sh +++ b/k8s/k8s-workload-registrar/mode-reconcile/scripts/deploy-scenario.sh @@ -2,7 +2,6 @@ PARENT_DIR="$(dirname "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )")" -kind create cluster --name example-cluster kubectl apply -f "${PARENT_DIR}"/k8s/namespace.yaml kubectl apply -f "${PARENT_DIR}"/k8s/spire-server.yaml kubectl rollout status statefulset/spire-server -n spire diff --git a/k8s/k8s-workload-registrar/mode-reconcile/scripts/test.sh b/k8s/k8s-workload-registrar/mode-reconcile/test.sh similarity index 84% rename from k8s/k8s-workload-registrar/mode-reconcile/scripts/test.sh rename to k8s/k8s-workload-registrar/mode-reconcile/test.sh index 5adadd9..4d37950 100755 --- a/k8s/k8s-workload-registrar/mode-reconcile/scripts/test.sh +++ b/k8s/k8s-workload-registrar/mode-reconcile/test.sh @@ -9,12 +9,12 @@ green=$(tput setaf 2) || true set_env() { echo "${bold}Setting up reconcile environment...${norm}" - "${DIR}"/deploy-scenario.sh + "${DIR}"/scripts/deploy-scenario.sh > /dev/null } cleanup() { echo "${bold}Cleaning up...${norm}" - kind delete cluster --name example-cluster + "${DIR}"/scripts/delete-scenario.sh > /dev/null } trap cleanup EXIT @@ -22,7 +22,7 @@ trap cleanup EXIT cleanup set_env -NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane" +NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/example-cluster/node/" AGENT_SPIFFE_ID="spiffe://example.org/agent" WORKLOAD_SPIFFE_ID="spiffe://example.org/example-workload" @@ -33,7 +33,7 @@ for ((i=0;i<"$MAX_FETCH_CHECKS";i++)); do if [[ -n $(kubectl exec -t statefulset/spire-server -n spire -c spire-server -- \ /opt/spire/bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock \ | grep "$NODE_SPIFFE_ID") ]] && - [[ -n $(kubectl exec -t daemonset/spire-agent -n spire -- \ + [[ -n $(kubectl exec -t daemonset/spire-agent -n spire -c spire-agent -- \ /opt/spire/bin/spire-agent api fetch -socketPath /tmp/spire-agent/public/api.sock \ | grep "$AGENT_SPIFFE_ID") ]] && [[ -n $(kubectl exec -t deployment/example-workload -n spire -- \ @@ -46,10 +46,8 @@ for ((i=0;i<"$MAX_FETCH_CHECKS";i++)); do done if [ "${DONE}" -eq 1 ]; then - echo "${green}Reconcile mode test succeeded.${norm}" + exit 0 else echo "${red}Reconcile mode test failed.${norm}" exit 1 fi - -exit 0 diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/admission-control.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/admission-control.yaml index 05480c2..7bb684b 100644 --- a/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/admission-control.yaml +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/admission-control.yaml @@ -5,4 +5,4 @@ plugins: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kind: WebhookAdmission - kubeConfigFile: /etc/kubernetes/pki/admctrl/kubeconfig.yaml + kubeConfigFile: /var/lib/minikube/certs/admctrl/kubeconfig.yaml diff --git a/k8s/k8s-workload-registrar/mode-webhook/kind-config.yaml b/k8s/k8s-workload-registrar/mode-webhook/kind-config.yaml deleted file mode 100644 index 799c5f8..0000000 --- a/k8s/k8s-workload-registrar/mode-webhook/kind-config.yaml +++ /dev/null @@ -1,19 +0,0 @@ -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -kubeadmConfigPatches: -- | - apiVersion: kubeadm.k8s.io/v1beta2 - kind: ClusterConfiguration - metadata: - name: config - apiServer: - extraArgs: - "service-account-signing-key-file": "/etc/kubernetes/pki/sa.key" - "service-account-issuer": "api" - "service-account-api-audiences": "api,spire-server" - "admission-control-config-file": "/etc/kubernetes/pki/admctrl/admission-control.yaml" -nodes: -- role: control-plane - extraMounts: - - containerPath: /etc/kubernetes/pki/admctrl - hostPath: WEBHOOKDIR/k8s/admctrl diff --git a/k8s/k8s-workload-registrar/mode-webhook/scripts/create-cluster.sh b/k8s/k8s-workload-registrar/mode-webhook/scripts/create-cluster.sh deleted file mode 100755 index 94c0c67..0000000 --- a/k8s/k8s-workload-registrar/mode-webhook/scripts/create-cluster.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -PARENT_DIR="$(dirname "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )")" -sed -i.bak "s#WEBHOOKDIR#${PARENT_DIR}#g" "${PARENT_DIR}"/kind-config.yaml - -rm "${PARENT_DIR}"/kind-config.yaml.bak -kind create cluster --name example-cluster --config "${PARENT_DIR}"/kind-config.yaml diff --git a/k8s/k8s-workload-registrar/mode-webhook/scripts/delete-scenario.sh b/k8s/k8s-workload-registrar/mode-webhook/scripts/delete-scenario.sh new file mode 100755 index 0000000..c10e22e --- /dev/null +++ b/k8s/k8s-workload-registrar/mode-webhook/scripts/delete-scenario.sh @@ -0,0 +1,11 @@ +#!/bin/bash +PARENT_DIR="$(dirname "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )")" + +kubectl delete -f "${PARENT_DIR}"/k8s/workload.yaml --ignore-not-found + +kubectl delete -f "${PARENT_DIR}"/k8s/spire-agent.yaml --ignore-not-found +kubectl delete -f "${PARENT_DIR}"/k8s/validation-webhook.yaml --ignore-not-found + +kubectl delete -f "${PARENT_DIR}"/k8s/spire-server.yaml --ignore-not-found +kubectl delete -f "${PARENT_DIR}"/k8s/k8s-workload-registrar-secret.yaml --ignore-not-found +kubectl delete -f "${PARENT_DIR}"/k8s/namespace.yaml --ignore-not-found diff --git a/k8s/k8s-workload-registrar/mode-webhook/scripts/deploy-scenario.sh b/k8s/k8s-workload-registrar/mode-webhook/scripts/deploy-scenario.sh index 1c8311c..e54a646 100755 --- a/k8s/k8s-workload-registrar/mode-webhook/scripts/deploy-scenario.sh +++ b/k8s/k8s-workload-registrar/mode-webhook/scripts/deploy-scenario.sh @@ -2,7 +2,22 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" PARENT_DIR="$(dirname "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )")" -bash "${DIR}"/create-cluster.sh +if [ -n "${TRAVIS}" ]; then + minikube stop + sudo cp -R "${PARENT_DIR}"/k8s/admctrl /var/lib/minikube/certs/ + minikube start --driver=none --bootstrapper=kubeadm --extra-config=apiserver.admission-control-config-file=/var/lib/minikube/certs/admctrl/admission-control.yaml +else + docker cp "${PARENT_DIR}"/k8s/admctrl minikube:/var/lib/minikube/certs/ + minikube stop + minikube start \ + --extra-config=apiserver.service-account-signing-key-file=/var/lib/minikube/certs/sa.key \ + --extra-config=apiserver.service-account-key-file=/var/lib/minikube/certs/sa.pub \ + --extra-config=apiserver.service-account-issuer=api \ + --extra-config=apiserver.service-account-api-audiences=api,spire-server \ + --extra-config=apiserver.authorization-mode=Node,RBAC \ + --extra-config=apiserver.admission-control-config-file=/var/lib/minikube/certs/admctrl/admission-control.yaml +fi + kubectl apply -f "${PARENT_DIR}"/k8s/namespace.yaml kubectl apply -f "${PARENT_DIR}"/k8s/k8s-workload-registrar-secret.yaml kubectl apply -f "${PARENT_DIR}"/k8s/spire-server.yaml diff --git a/k8s/k8s-workload-registrar/mode-webhook/scripts/test.sh b/k8s/k8s-workload-registrar/mode-webhook/test.sh similarity index 87% rename from k8s/k8s-workload-registrar/mode-webhook/scripts/test.sh rename to k8s/k8s-workload-registrar/mode-webhook/test.sh index 2b7bb44..6efb4fb 100755 --- a/k8s/k8s-workload-registrar/mode-webhook/scripts/test.sh +++ b/k8s/k8s-workload-registrar/mode-webhook/test.sh @@ -9,12 +9,12 @@ green=$(tput setaf 2) || true set_env() { echo "${bold}Setting up webhook environment...${norm}" - "${DIR}"/deploy-scenario.sh + "${DIR}"/scripts/deploy-scenario.sh > /dev/null } cleanup() { echo "${bold}Cleaning up...${norm}" - kind delete cluster --name example-cluster + "${DIR}"/scripts/delete-scenario.sh > /dev/null } trap cleanup EXIT @@ -33,7 +33,7 @@ for ((i=0;i<"$MAX_FETCH_CHECKS";i++)); do if [[ -n $(kubectl exec -t statefulset/spire-server -n spire -c spire-server -- \ /opt/spire/bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock \ | grep "$NODE_SPIFFE_ID") ]] && - [[ -n $(kubectl exec -t daemonset/spire-agent -n spire -- \ + [[ -n $(kubectl exec -t daemonset/spire-agent -n spire -c spire-agent -- \ /opt/spire/bin/spire-agent api fetch -socketPath /tmp/spire-agent/public/api.sock \ | grep "$AGENT_SPIFFE_ID") ]] && [[ -n $(kubectl exec -t deployment/example-workload -n spire -- \ @@ -46,10 +46,8 @@ for ((i=0;i<"$MAX_FETCH_CHECKS";i++)); do done if [ "${DONE}" -eq 1 ]; then - echo "${green}Webhook mode test succeeded.${norm}" + exit 0 else echo "${red}Webhook mode test failed.${norm}" exit 1 fi - -exit 0 diff --git a/k8s/k8s-workload-registrar/test.sh b/k8s/k8s-workload-registrar/test.sh index 9390b48..1f052af 100755 --- a/k8s/k8s-workload-registrar/test.sh +++ b/k8s/k8s-workload-registrar/test.sh @@ -13,10 +13,10 @@ fail() { } for testdir in "${DIR}"/*; do - if [[ -x "${testdir}/scripts/test.sh" ]]; then + if [[ -x "${testdir}/test.sh" ]]; then testname=$(basename "$testdir") echo "${bold}Running \"$testname\" test...${norm}" - if ${testdir}/scripts/test.sh; then + if ${testdir}/test.sh; then echo "${green}\"$testname\" test succeeded${norm}" else echo "${red}\"$testname\" test failed${norm}" @@ -28,4 +28,3 @@ done if [ -n "${FAILED}" ]; then fail "There were test failures" fi -echo "${green}Done. Kubernetes workload registrar tests passed!${norm}" \ No newline at end of file From 76a63685beac1e7f48ad3b2ee40374f206bab34c Mon Sep 17 00:00:00 2001 From: Luciano Date: Mon, 2 Aug 2021 20:41:54 -0300 Subject: [PATCH 5/5] address PR comments Signed-off-by: Luciano --- k8s/k8s-workload-registrar/README.md | 268 ++++++++---------- .../mode-crd/k8s/spire-agent.yaml | 6 +- .../mode-crd/k8s/spire-server.yaml | 14 +- k8s/k8s-workload-registrar/mode-crd/test.sh | 2 +- .../mode-reconcile/k8s/spire-agent.yaml | 9 +- .../mode-reconcile/k8s/spire-server.yaml | 24 +- .../mode-reconcile/test.sh | 2 +- .../mode-webhook/k8s/admctrl/kubeconfig.yaml | 2 - .../k8s/k8s-workload-registrar-secret.yaml | 1 - .../mode-webhook/k8s/spire-agent.yaml | 9 +- .../mode-webhook/k8s/spire-server.yaml | 26 +- .../mode-webhook/k8s/validation-webhook.yaml | 2 - .../mode-webhook/test.sh | 2 +- 13 files changed, 136 insertions(+), 231 deletions(-) diff --git a/k8s/k8s-workload-registrar/README.md b/k8s/k8s-workload-registrar/README.md index b8d2b70..6b90fcf 100644 --- a/k8s/k8s-workload-registrar/README.md +++ b/k8s/k8s-workload-registrar/README.md @@ -1,13 +1,13 @@ # Configure SPIRE to use the Kubernetes Workload Registrar - This tutorial provides an example of how to configure the SPIRE Kubernetes Workload Registrar as a container within the SPIRE Server pod using a local cluster deployed with [kind](https://kind.sigs.k8s.io/). The registrar enables automatic workload registration and management in SPIRE Kubernetes implementations. It is highly encouraged to execute, or at least read through, the [Kubernetes Quickstart Tutorial](../quickstart/) to fully understand this tutorial as a similar deployment is used here. + This tutorial builds on the [Kubernetes Quickstart Tutorial](../quickstart/) to provide an example of how to configure the SPIRE Kubernetes Workload Registrar as a container within the SPIRE Server pod. The registrar enables automatic workload registration and management in SPIRE Kubernetes implementations. The changes required to deploy the registrar and the necessary files are shown as a delta to the quickstart tutorial, so it is highly encouraged to execute, or at least read through, the Kubernetes Quickstart Tutorial first. This tutorial demonstrates how to use the registrar's three different modes: * Webhook - For historical reasons, the webhook mode is the default but reconcile and CRD modes are now preferred because webhook can create StatefulSets and pods with no entries and cause other cleanup and scalability issues. - * Reconcile - The reconcile mode uses reconciling controllers rather than webhooks. It may may be slightly faster to create new entries than CRD mode and requires less configuration. + * Reconcile - The reconcile mode uses reconciling controllers rather than webhooks. It may be slightly faster to create new entries than CRD mode and requires less configuration. * CRD - The CRD mode provides a namespaced SpiffeID custom resource and is best for cases where you plan to manage SpiffeID custom resources directly. -See the [Differences between modes](https://github.com/spiffe/spire/tree/master/support/k8s/k8s-workload-registrar#differences-between-modes) section of the GitHub README for more information. +For more information, see the [Differences between modes](https://github.com/spiffe/spire/tree/master/support/k8s/k8s-workload-registrar#differences-between-modes) section of the registrar README. In this document you will learn how to: * Deploy the K8s Workload Registrar as a container within the SPIRE Server Pod @@ -15,22 +15,21 @@ In this document you will learn how to: * Use the three workload registration modes * Test successful registration entries creation -For documentation about SPIRE Kubernetes Workload Registrar configuration options, see the [GitHub README](https://github.com/spiffe/spire/tree/master/support/k8s/k8s-workload-registrar). +See the SPIRE Kubernetes Workload Registrar [README](https://github.com/spiffe/spire/tree/master/support/k8s/k8s-workload-registrar) for complete configuration options. # Prerequisites Before proceeding, review the following list: - * It is recommended to go through the [Kubernetes Quickstart Tutorial](../quickstart/) before proceeding with the steps described in this guide. - * Required configuration files for this tutorial can be found in the `k8s/k8s-workload-registrar` directory in [https://github.com/spiffe/spire-tutorials](https://github.com/spiffe/spire-tutorials). + * You'll need access to the Kubernetes environment configured when going through the [Kubernetes Quickstart Tutorial](../quickstart/). + * Required configuration files for this tutorial can be found in the `k8s/k8s-workload-registrar` directory in [https://github.com/spiffe/spire-tutorials](https://github.com/spiffe/spire-tutorials). If you didn't already clone the repo for the _Kubernetes Quickstart Tutorial_, please do so now. * The steps in this document should work with Kubernetes version 1.20.2. -We will deploy an scenario that consists of a StatefulSet containing a SPIRE Server and the Kubernetes Workload Registrar, a SPIRE Agent, and a workload, and configure the different modes to illustrate the automatic registration entries creation. +We will deploy a scenario that consists of a StatefulSet containing a SPIRE Server and the Kubernetes Workload Registrar, a SPIRE Agent, and a workload, and configure the different modes to illustrate automatic registration entries creation. -# Common configuration +# Common configuration: socket setup -Below, we describe parts of the configurations that are common to the three modes. - -The SPIRE Server and the Kubernetes Workload registrar will communicate each other using a socket, that will be mounted at the `/tmp/spire-server/private` directory, as we can see from the `volumeMounts` section of both containers. The only difference between these sections is that, for the registrar, the socket will have the `readOnly` option set to `true`, while for the SPIRE Server container it will have its value set to `false`. The registrar container's section that illustrate what we described earlier is shown: +Socket configuration is necessary in all three registrar modes. +The SPIRE Server and the Kubernetes Workload registrar will communicate to each other using a socket mounted at the `/tmp/spire-server/private` directory, as we can see from the `volumeMounts` section of both containers. The only difference between these sections is that, for the registrar, the socket will have the `readOnly` option set to `true`, while for the SPIRE Server container it will have its value set to `false`. For example, here is the registrar container's `volumeMounts` section from `spire-server.yaml`: ``` volumeMounts: - name: spire-server-socket @@ -38,11 +37,16 @@ volumeMounts: readOnly: true ``` -# Webhook mode (default) +Continue with the registrar mode that you want to try out: +* [Webhook](#configure-webhook-mode) +* [Reconcile](#configure-reconcile-mode) +* [CRD](#configure-crd-mode) + +# Configure Webhook mode -In this section we will review the important files needed to configure Webhook mode. +This section describes the older, default webhook mode of the Kubernetes Workload Registrar. We will review the important files needed to configure it. -This mode makes use of the `ValidatingWebhookConfiguration` feature from Kubernetes, which is called by the Kubernetes API server everytime a new pod is created or deleted in the cluster, as we can see from the rules of the resource below: +This mode makes use of the `ValidatingWebhookConfiguration` feature from Kubernetes, which is called by the Kubernetes API server every time a new pod is created or deleted in the cluster, as we can see from the rules of the resource below: ``` ApiVersion: admissionregistration.k8s.io/v1beta1 @@ -67,9 +71,9 @@ webhooks: scope: "Namespaced" ``` -This webhook itself authenticates the API server, and for this reason we provide a CA bundle, with the `caBundle` option, as we can see in the stanza above (value ommited for brevity). This authentication must be done to ensure that it is the API server who is contacting the webhook, because this situation will lead to registration entries creation or deletion on the SPIRE Server, something that is a key point in the SPIRE infrastructure, and should be tightly controlled. +The webhook authenticates the API server, and for this reason we provide a CA bundle with the `caBundle` option, as we can see in the stanza above (value omitted for brevity). This authentication must be done to ensure that it is the API server that is contacting the webhook, because this situation will lead to registration entry creation or deletion on the SPIRE Server, something that is a key point in the SPIRE infrastructure and should be strongly secured. -Also, a secret is volume mounted in the `/run/spire/k8s-workload-registrar/secret` directory inside the SPIRE Server container, containing the K8S Workload Registrar server key. We can see this in the `volumeMounts` section of the SPIRE Server statefulset configuration file: +Also, a secret is volume mounted in the `/run/spire/k8s-workload-registrar/secret` directory inside the SPIRE Server container. This secret contains the K8s Workload Registrar server key. We can see this in the `volumeMounts` section of the SPIRE Server statefulset configuration file: ``` - name: k8s-workload-registrar-secret @@ -90,9 +94,9 @@ data: server-key.pem: ... ``` -Again, the value of the key is ommited. +Again, the value of the key is omitted. -Another configuration that is relevant in this mode is the registrar certificates `ConfigMap`, that contains the K8S Workload Registrar server certificate and CA bundle used to verify the client certificate presented by the API server. This is mounted in the `/run/spire/k8s-workload-registrar/certs` directory. We can also check this by seeing the `volumeMounts` section of the SPIRE Server statefulset configuration file, which is shown below: +Another setting that is relevant in this mode is the registrar certificate's `ConfigMap`, that contains the K8s Workload Registrar server certificate and CA bundle used to verify the client certificate presented by the API server. This is mounted in the `/run/spire/k8s-workload-registrar/certs` directory. This is defined in the `volumeMounts` section of the SPIRE Server statefulset configuration file, which is shown below: ``` - name: k8s-workload-registrar-certs @@ -100,7 +104,7 @@ Another configuration that is relevant in this mode is the registrar certificate readOnly: true ``` -This certificates are stored in a `ConfigMap`: +These certificates are stored in a `ConfigMap`: ``` apiVersion: v1 @@ -136,16 +140,16 @@ data: k8s-workload-registrar.conf: | trust_domain = "example.org" server_socket_path = "/tmp/spire-server/private/api.sock" - cluster = "example-cluster" + cluster = "demo-cluster" mode = "webhook" cert_path = "/run/spire/k8s-workload-registrar/certs/server-cert.pem" key_path = "/run/spire/k8s-workload-registrar/secret/server-key.pem" cacert_path = "/run/spire/k8s-workload-registrar/certs/cacert.pem" ``` -As we can see, the `key_path` points to where the secret containing the server key is mounted, which was shown earlier. The `cert_path` and `cacert_path` points to the directory where the `ConfigMap` with the PEM encoded certificates for the server and for the CA are mounted. When the webhook is triggered, the registrar acts as the server and validates the identity of the client, which is the Kubernetes API server in this case. We can disable this authentication by setting the ```insecure_skip_client_verification``` option to `true` (though it is not recommended). +As we can see, the `key_path` points to where the secret containing the server key is mounted, which was shown earlier. The `cert_path` and `cacert_path` entries point to the directory where the `ConfigMap` with the PEM-encoded certificates for the server and for the CA are mounted. When the webhook is triggered, the registrar acts as the server and validates the identity of the client, which is the Kubernetes API server in this case. We can disable this authentication by setting the ```insecure_skip_client_verification``` option to `true` (though it is not recommended). -For the authentication, a `KubeConfig` file with the client certificate and key the API server should use to authenticate with the registrar is mounted inside the filesystem of the Kubernetes node. This file is shown below: +For authentication, a `KubeConfig` file with the client certificate and the key the API server should use to authenticate with the registrar is mounted inside the filesystem of the Kubernetes node. This file is shown below: ``` apiVersion: v1 @@ -167,67 +171,39 @@ plugins: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kind: WebhookAdmission - kubeConfigFile: /etc/kubernetes/pki/admctrl/kubeconfig.yaml + kubeConfigFile: /var/lib/minikube/certs/admctrl/kubeconfig.yaml ``` -To mount the two files into the node, we will use the special option `extraMounts` of kind, that allows us to pass files between the host and a kind node. - -``` -nodes: -- role: control-plane - extraMounts: - - containerPath: /etc/kubernetes/pki/admctrl - hostPath: WEBHOOKDIR/k8s/admctrl -``` +To mount the two files into the node, and as we are using the docker driver to start minikube, we will use the `docker cp` directive. Once the files are placed into the node's filesystem, we use the `apiserver.admission-control-config-file` extra flag to specify the location of the admission control configuration file, which will be put in `/var/lib/minikube/certs/admctrl/admission-control.yaml`. -The ```WEBHOOKDIR/k8s/admctrl``` variable points to the folder host path where the files are stored, and `containerPath` specify the directory in which the files will be mounted. +## Run the registrar in webhook mode -In kind's specific configuration file (`kind-config.yaml`), we use the `admission-control-config-file` option to tell the API server where to find the admission configuration. Note that this value match with the one set in `ContainerPath`. - -``` -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -kubeadmConfigPatches: -- | - apiVersion: kubeadm.k8s.io/v1beta2 - kind: ClusterConfiguration - metadata: - name: config - apiServer: - extraArgs: - "service-account-signing-key-file": "/etc/kubernetes/pki/sa.key" - "service-account-issuer": "api" - "service-account-api-audiences": "api,spire-server" - "admission-control-config-file": "/etc/kubernetes/pki/admctrl/admission-control.yaml" -... -``` - -We have looked at the key points of the webhook mode's configuration, so let's apply the necessary files to set our scenario with a SPIRE Server with the registrar container in it, an Agent, and a workload, by issuing the following command in the `mode-webhook` directory: +We have looked at the key points of the webhook mode's configuration, so let's apply the necessary files to enable our scenario with a SPIRE Server with the registrar container in it, an Agent, and a workload by issuing the following command in the `mode-webhook` directory: ```console $ bash scripts/deploy-scenario.sh ``` -This will create a new kubernetes cluster for us, and apply the necessary files for the scenario to work. This is all we need to have the registration entries created on the server. We will run the server command to see the registration entries created, by executing the command below: +This is all we need to have the registration entries created on the server. We will run the server command to see the registration entries created, by executing the command below: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock +$ kubectl exec statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` -You should see the following 3 registration entries, corresponding to the node, the agent, and the workload (the order of the results may differ with your output). +You should see the following three registration entries, corresponding to the node, the agent, and the workload (the order of the results may differ in your output). ```console Found 3 entries Entry ID : ... -SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node +SPIFFE ID : spiffe://example.org/k8s-workload-registrar/demo-cluster/node Parent ID : spiffe://example.org/spire/server Revision : 0 TTL : default -Selector : k8s_psat:cluster:example-cluster +Selector : k8s_psat:cluster:demo-cluster Entry ID : ... SPIFFE ID : spiffe://example.org/ns/spire/sa/spire-agent -Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node +Parent ID : spiffe://example.org/k8s-workload-registrar/demo-cluster/node Revision : 0 TTL : default Selector : k8s:ns:spire @@ -235,32 +211,32 @@ Selector : k8s:pod-name:spire-agent-wtx7b Entry ID : ... SPIFFE ID : spiffe://example.org/ns/spire/sa/default -Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node +Parent ID : spiffe://example.org/k8s-workload-registrar/demo-cluster/node Revision : 0 TTL : default Selector : k8s:ns:spire Selector : k8s:pod-name:example-workload-6877cd47d5-2fmpq ``` -We omitted the entry ids, as it may change with every run. Let's see how the other fields are built: +We omitted the entry IDs, as those may change with every run. Let's see how the other fields are built: -The cluster name *example-cluster* is used as Parent ID for the entries that correspond to the agent and the workload, but there is no reference to the node that this pods belong to, this is, the registration entries are mapped to a single node entry inside the cluster. This represents a drawback for this mode, as all the nodes in the cluster have permission to get identities for all the workloads that belong to the Kubernetes cluster, which increases the blast radius in case of a node being compromised, among other disadvantages. +The cluster name *demo-cluster* is used in the Parent ID field for the entries that correspond to the agent and the workload (second and third, respectively), but there is no reference to the node that these pods belong to, this is, the registration entries are mapped to a single node entry inside the cluster. This represents a drawback for this mode, as all the nodes in the cluster have permission to get identities for all the workloads that belong to the Kubernetes cluster, which increases the blast radius in case of a node being compromised, among other disadvantages. -Taking a look on the assigned SPIFFE IDs for the agent and the workload, we can see that they have the following form: +Taking a look at the assigned SPIFFE IDs for the agent and the workload, we can see that they have the following form: *spiffe://\/ns/\/sa/\*. From this, we can conclude that we are using the registrar configured with the Service Account Based workload registration (which is the default behaviour). For instance, as the workload uses the *default* service account, into the *spire* namespace, its SPIFFE ID is: *spiffe://example.org/ns/spire/sa/default* -Another thing that is worth to examine is the registrar log, in which we will found out if the entries were created by this container. Run the following command to get the logs of the registrar, and to look for the *Created pod entry* keyword. +Another thing that is worthwhile to examine is the registrar log to find out if the entries were created by this container. Run the following command to display lines in the log that match *Created pod entry*. ```console $ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "Created pod entry" ``` -The output of this command includes 3 lines, one for every entry created, and we can conclude that the 3 entries that were present on the SPIRE Server were created by the registrar. They correspond to the node, agent, and workload, in that specific order. +The output of this command includes three lines, one for every entry created. We can conclude that the three entries that were present on the SPIRE Server were created by the registrar. They correspond to the node, agent, and workload, in that specific order. ## Pod deletion -Let's see how the registrar handles a pod deletion, and which impact does it have on the registration entries. Run the following command to delete the workload deployment: +Let's see how the registrar handles a pod deletion, and the impact it has on the registration entries. Run the following command to delete the workload deployment: ```console $ kubectl delete deployment/example-workload -n spire @@ -269,50 +245,50 @@ $ kubectl delete deployment/example-workload -n spire Again, check for the registration entries with the command below: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock +$ kubectl exec statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` -The output of the command will not include the registration entry that correspond to the workload, because the pod was deleted, and should be similar to: +The output of the command will not include the registration entry that corresponds to the workload, because the pod was deleted, and should be similar to: ```console Found 2 entries Entry ID : ... -SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node +SPIFFE ID : spiffe://example.org/k8s-workload-registrar/demo-cluster/node Parent ID : spiffe://example.org/spire/server Revision : 0 TTL : default -Selector : k8s_psat:cluster:example-cluster +Selector : k8s_psat:cluster:demo-cluster Entry ID : ... SPIFFE ID : spiffe://example.org/ns/spire/sa/spire-agent -Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node +Parent ID : spiffe://example.org/k8s-workload-registrar/demo-cluster/node Revision : 0 TTL : default Selector : k8s:ns:spire Selector : k8s:pod-name:spire-agent-wtx7b ``` -We will check the registrar logs to find out if the registrar deleted the entry, looking for the "Deleted pod entry" keyword, with the command shown below: +We will check the registrar logs to find out if it deleted the entry, looking for the "Deleting pod entries" keyword, with the command shown below: ```console $ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "Deleting pod entries" ``` -The registrar successfuly deleted the corresponding entry for the *example-workload* pod. +The registrar successfully deleted the corresponding entry for the *example-workload* pod. ## Teardown -To delete the resources used for this mode, we will delete the cluster by executing: +To delete the resources used for this mode, we'll issue the `delete-scenario.sh` script: ```console -kind delete cluster --name example-cluster +$ bash scripts/delete-scenario.sh ``` -# Reconcile mode +# Configure reconcile mode -This mode, as opposed to Webhook mode, does not use a validating webhook but two reconciling controllers instead: one for the nodes and one for the pods. For this reason, we will not deploy all the configuration needed to perform the Kubernetes API server authentication, as the secret and `KubeConfig` entry, for instance, situation that makes the configuration simpler. +This mode, as opposed to webhook mode, does not use a validating webhook but two reconciling controllers instead: one for the nodes and one for the pods. For this reason, it isn't necessary to configure Kubernetes API server authentication with secrets and the `KubeConfig` entry, making the configuration much simpler. -We will jump directly into the registrar's container configuration, which is shown below: +The registrar's container configuration is: ``` apiVersion: v1 @@ -324,7 +300,7 @@ data: k8s-workload-registrar.conf: | trust_domain = "example.org" server_socket_path = "/tmp/spire-server/private/api.sock" - cluster = "example-cluster" + cluster = "demo-cluster" mode = "reconcile" pod_label = "spire-workload" metrics_addr = "0" @@ -332,16 +308,18 @@ data: We are explicitly indicating that *reconcile* mode is used. For the sake of the tutorial, we will be using Label Based workload registration for this mode (as we can see from the `pod_label` configurable), though every workload registration mode can be used with every registrar mode. This is all the configuration that is needed to have the containers working properly. -We will deploy the same scenario as the previous mode, with the difference on the agent and workload pods: they will be labeled with the *spire-workload* label, that corresponds to the value indicated in the `pod_label` option of the `ConfigMap` shown above. Ensure that your working directory is `mode-reconcile` and run the following command to set the scenario: +## Run the registrar in reconcile mode + +We will deploy the same scenario as the previous mode, with the difference in the agent and workload pods: they will be labeled with the *spire-workload* label that corresponds to the value indicated in the `pod_label` option of the `ConfigMap` shown above. Ensure that your working directory is `mode-reconcile` and run the following command to start the scenario: ```console $ bash scripts/deploy-scenario.sh ``` -With the Reconcile scenario set, we will check the registration entries and some special considerations for this mode. Let's issue the command below to start a shell into the SPIRE Server container, and to show the existing registration entries. +With the reconcile scenario running, we will check the registration entries and some special considerations for this mode. Let's issue the command below to show the existing registration entries. ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock +$ kubectl exec statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` Your output should similar to the following, and shows the entries for the node, the agent and the workload: @@ -349,16 +327,16 @@ Your output should similar to the following, and shows the entries for the node, ```console Found 3 entries Entry ID : ... -SPIFFE ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane +SPIFFE ID : spiffe://example.org/spire-k8s-registrar/demo-cluster/node/minikube Parent ID : spiffe://example.org/spire/server Revision : 0 TTL : default -Selector : k8s_psat:agent_node_name:example-cluster-control-plane -Selector : k8s_psat:cluster:example-cluster +Selector : k8s_psat:agent_node_name:minikube +Selector : k8s_psat:cluster:demo-cluster Entry ID : ... SPIFFE ID : spiffe://example.org/agent -Parent ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane +Parent ID : spiffe://example.org/spire-k8s-registrar/demo-cluster/node/minikube Revision : 0 TTL : default Selector : k8s:ns:spire @@ -366,28 +344,28 @@ Selector : k8s:pod-name:spire-agent-c5c5f Entry ID : ... SPIFFE ID : spiffe://example.org/example-workload -Parent ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane +Parent ID : spiffe://example.org/spire-k8s-registrar/demo-cluster/node/minikube Revision : 0 TTL : default Selector : k8s:ns:spire Selector : k8s:pod-name:example-workload-b98cc787d-kzxz6 ``` -If we compare this entries to those created using Webhook mode, the difference is that the Parent ID of the SVID contains a reference to the node name where the pod is scheduled on, in this case, `example-cluster-control-plane`. We mentioned that this doesn't happen using the Webhook node, and this was one of its principal drawbacks. Also, for the node registration entry (the one that has the SPIRE Server SPIFFE ID as the Parent ID), the node name is used in the selectors, along with the cluster name. For the remaining two entries, pod name and namespace are used in the selectors instead. +If we compare these entries to those created using webhook mode, the difference is that the Parent ID of the agent and workload registration entries (second and third, respectively) contains a reference to the node where the pods are scheduled on, in this case, using its name `minikube`. We mentioned that this doesn't happen using the webhook mode, and this was one of the principal drawbacks of that mode. Also, the pod name and namespace are used in the selectors. For the node registration entry (the one that has the SPIRE Server SPIFFE ID as the Parent ID), the node name is used in the selectors, along with the cluster name. -As we are using Label workload registration mode, the SPIFFE ID's for the agent and the workload (which are labeled as we mentioned before) have the form: *spiffe://\/\*. For example, as the agent has the label value equal to `agent`, it has the following SPIFFE ID: *spiffe://example.org/agent*. +As we are using Label workload registration mode, the SPIFFE IDs for the agent and the workload (which are labeled as we mentioned before) have the form: *spiffe://\/\*. For example, as the agent has the label value equal to `agent`, it has the following SPIFFE ID: *spiffe://example.org/agent*. -Let's check if the registrar indeed created the registration entries, by checking its logs, and looking for the *Created new spire entry* keyword. Run the command that is shown below: +Let's check if the registrar indeed created the registration entries by checking its logs, and looking for the *Created new spire entry* keyword. Run the command that is shown below: ```console -$ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "Created new spire entry" +$ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "controllers.*Created new spire entry" ``` We mentioned before that there were two reconciling controllers, and from the output of the command above, we can see that the node controller created the entry for the single node in the cluster, and that the pod controller created the entries for the two labeled pods: agent and workload. ## Pod deletion -The Kubernetes Workload Registrar automatically handles the creation and deletion of registration entries. We already see how the entries are created, and now we will test its deletion. Let's delete the workload deployment: +The Kubernetes Workload Registrar automatically handles the creation and deletion of registration entries. We just saw how the entries are created, and now we will test deletion. Let's delete the workload deployment: ```console $ kubectl delete deployment/example-workload -n spire @@ -396,7 +374,7 @@ $ kubectl delete deployment/example-workload -n spire We will check if its corresponding entry is deleted too. Run the following command to see the registration entries on the SPIRE Server: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock +$ kubectl exec statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` The output will only show two registration entries, because the workload entry was deleted by the registrar: @@ -405,32 +383,32 @@ The output will only show two registration entries, because the workload entry w Found 2 entries Entry ID : ... SPIFFE ID : spiffe://example.org/agent -Parent ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane +Parent ID : spiffe://example.org/spire-k8s-registrar/demo-cluster/node/minikube Revision : 0 TTL : default Selector : k8s:ns:spire Selector : k8s:pod-name:spire-agent-c5c5f Entry ID : ... -SPIFFE ID : spiffe://example.org/spire-k8s-registrar/example-cluster/node/example-cluster-control-plane +SPIFFE ID : spiffe://example.org/spire-k8s-registrar/demo-cluster/node/minikube Parent ID : spiffe://example.org/spire/server Revision : 0 TTL : default -Selector : k8s_psat:agent_node_name:example-cluster-control-plane -Selector : k8s_psat:cluster:example-cluster +Selector : k8s_psat:agent_node_name:minikube +Selector : k8s_psat:cluster:demo-cluster ``` If we look for the *Deleted entry* keyword on the registrar logs, we will find out that the registrar deleted the entry. Issue the following command: ```console -$ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "Deleted entry" +$ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "controllers.*Deleted entry" ``` -The pod controller successfuly deleted the entry. +The pod controller successfully deleted the entry. ## Non-labeled pods -As we are using Label Based workload registration, only pods that have the *spire-workload* label will have its registration entry automatically created. Let's deploy a pod that has no label with the command below, by executing the comand below, from the `mode-reconcile` directory: +As we are using Label Based workload registration, only pods that have the *spire-workload* label will have their registration entries automatically created. Let's deploy a pod that has no label by executing the command below from the `mode-reconcile` directory: ```console $ kubectl apply -f k8s/not-labeled-workload.yaml @@ -439,22 +417,22 @@ $ kubectl apply -f k8s/not-labeled-workload.yaml Let's see the existing registration entries with the command: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock +$ kubectl exec statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` -The output should remain constant compared to the one that we obtained in the *Pod deletion* section. This implies that the only two registration entries on the SPIRE Server corresponds to the labeled deployed resources. This is the expected behaviour, as only labeled pods will be considered by the workload registrar while using the Label Workload registration mode. +The output should be the same as the output that we obtained in the *Pod deletion* section. This implies that the registrar only creates entries for pods that are using the matching label. ## Teardown -To delete the resources used for this mode, we will delete the cluster by executing: +To delete the resources used for this mode, issue the `delete-scenario.sh` script: ```console -kind delete cluster --name example-cluster +$ bash scripts/delete-scenario.sh ``` -# CRD mode +# Configure CRD mode -This mode takes advantage of the `CustomResourceDefinition` feature from Kubernetes, which allows SPIRE to integrate with this tool and its control plane. A SPIFFE ID is defined as a custom resource, with an structure that matches the form of a registration entry. Below is a reduced example of the definition of a SPIFFE ID CRD. +This mode takes advantage of the `CustomResourceDefinition` feature from Kubernetes, which allows SPIRE to integrate with this tool and its control plane. A SPIFFE ID is defined as a custom resource, with a structure that matches the form of a registration entry. Below is a simplified example of the definition of a SPIFFE ID CRD. ``` apiVersion: spiffeid.spiffe.io/v1beta1 @@ -470,7 +448,7 @@ spec: spiffeId: spiffe://example.org/test ``` -The main goal of the custom resource is to track the intent of what and how the registration entries should look on the SPIRE Server, and to track any modification of these registration entries, reconciling its existence. This means that every SPIFFE ID CRD will have a matching registration entry, whose existence will be closely linked. Every modification done to the registration entry will have an impact on its corresponding SPIFFE ID CRD, and viceversa. +The main goal of the custom resource is to track the intent of what and how the registration entries should look on the SPIRE Server by keeping these resources in sync with any modification made to the registration entries. This means that every SPIFFE ID CRD will have a matching registration entry whose existence will be closely linked. Every modification done to a registration entry will have an impact on its corresponding SPIFFE ID CRD, and vice versa. The `ConfigMap` for the registrar below shows that we will be using the *crd* mode, and that Annotation Based workload registration is used along with it. The annotation that the registrar will look for is *spiffe.io/spiffe-id*. @@ -484,12 +462,14 @@ data: k8s-workload-registrar.conf: | trust_domain = "example.org" server_socket_path = "/tmp/spire-server/private/api.sock" - cluster = "example-cluster" + cluster = "demo-cluster" mode = "crd" pod_annotation = "spiffe.io/spiffe-id" metrics_bind_addr = "0" ``` +## Run the registrar in CRD mode + Let's deploy the necessary files, including the base scenario plus the SPIFFE ID CRD definition, and examine the automatically created registration entries. Ensure that your working directory is `mode-crd`, and run: ```console @@ -499,7 +479,7 @@ $ bash scripts/deploy-scenario.sh Run the entry show command by executing: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock +$ kubectl exec statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` The output should show the following registration entries: @@ -507,49 +487,49 @@ The output should show the following registration entries: ```console Found 3 entries Entry ID : ... -SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +SPIFFE ID : spiffe://example.org/k8s-workload-registrar/demo-cluster/node/minikube Parent ID : spiffe://example.org/spire/server Revision : 1 TTL : default Selector : k8s_psat:agent_node_uid:08990bfd-3551-4761-8a1b-2e652984ffdd -Selector : k8s_psat:cluster:example-cluster +Selector : k8s_psat:cluster:demo-cluster Entry ID : ... SPIFFE ID : spiffe://example.org/testing/agent -Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +Parent ID : spiffe://example.org/k8s-workload-registrar/demo-cluster/node/minikube Revision : 1 TTL : default -Selector : k8s:node-name:example-cluster-control-plane +Selector : k8s:node-name:minikube Selector : k8s:ns:spire Selector : k8s:pod-uid:538886bb-48e1-4795-b386-10e97f50e34f DNS name : spire-agent-jzc8w Entry ID : ... SPIFFE ID : spiffe://example.org/testing/example-workload -Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +Parent ID : spiffe://example.org/k8s-workload-registrar/demo-cluster/node/minikube Revision : 1 TTL : default -Selector : k8s:node-name:example-cluster-control-plane +Selector : k8s:node-name:minikube Selector : k8s:ns:spire Selector : k8s:pod-uid:78ed3fc5-4cff-476a-90f5-37d3abd47823 DNS name : example-workload-6877cd47d5-l4hv5 ``` -3 entries were created corresponding to the node, agent, and workload. For the node entry (the one that has the SPIRE Server SPIFFE ID as Parent ID), we see a difference in the selectors, comparing it with the selectors in the node entry created using Reconcile mode for the same pod: we find out that instead of using the node name, CRD mode stores the UID of the node where the agent is running on. As the node name is used in the SPIFFE ID assigned to the node, we can take this as a mapping from node UID to node name. +Three entries were created corresponding to the node, agent, and workload. For the node entry (the one that has the SPIRE Server SPIFFE ID as Parent ID), we see a difference in the selectors compared to reconcile mode: instead of using the node name, CRD mode stores the UID of the node where the agent is running on, and as the node name is used in the SPIFFE ID, we can take this as a mapping from node UID to node name. -Something similar happens with the pod entries, but this time the pod UID where the workload is running is stored in the selectors, instead of the node UID. +Something similar happens with the pod entries, but this time the pod UID where the workload is running is stored in the selectors instead of the node UID. If we now focus our attention on the SPIFFE IDs assigned to the workloads, we see that it takes the form of *spiffe://\/\*. By using Annotation Based workload registration, it is possible to freely set the SPIFFE ID path. In this case, for the workload, we set the annotation value to *example-workload*. Obtain the registrar logs by issuing: ```console -kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "Created entry" +$ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep "Created entry" ``` -This will show that the registrar created the 3 entries into the SPIRE Server. +This will show that the registrar created the three entries in the SPIRE Server. -In addition to the SPIRE entries, the registrar in this mode is configure to create the corresponding custom resources. Let's check for this using a Kubernetes native command such as: +In addition to the SPIRE entries, the registrar in this mode is configured to create the corresponding custom resources. Let's check for this using a Kubernetes native command such as: ```console $ kubectl get spiffeids -n spire @@ -559,7 +539,7 @@ This command will show the custom resources for each one of the pods: ```console NAME AGE -example-cluster-control-plane 24m +minikube 24m example-workload-5bffcd75d-stl5w 24m spire-agent-r86rz 24m ``` @@ -575,7 +555,7 @@ $ kubectl delete deployment/example-workload -n spire And now, check the registration entries in the SPIRE Server by executing: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock +$ kubectl exec statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` The output should look like: @@ -583,41 +563,41 @@ The output should look like: ```console Found 2 entries Entry ID : ... -SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +SPIFFE ID : spiffe://example.org/k8s-workload-registrar/demo-cluster/node/minikube Parent ID : spiffe://example.org/spire/server Revision : 1 TTL : default Selector : k8s_psat:agent_node_uid:08990bfd-3551-4761-8a1b-2e652984ffdd -Selector : k8s_psat:cluster:example-cluster +Selector : k8s_psat:cluster:demo-cluster Entry ID : ... SPIFFE ID : spiffe://example.org/testing/agent -Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +Parent ID : spiffe://example.org/k8s-workload-registrar/demo-cluster/node/minikube Revision : 1 TTL : default -Selector : k8s:node-name:example-cluster-control-plane +Selector : k8s:node-name:minikube Selector : k8s:ns:spire Selector : k8s:pod-uid:538886bb-48e1-4795-b386-10e97f50e34f DNS name : spire-agent-jzc8w ``` -The only entries that should exist now are the ones that match the node and the SPIRE agent, because the workload one was deleted by the registrar, something that we can check if we examine the registrar logs, but this time looking for the keyword "Deleted entry". +The only entries that should exist now are the ones that match the node and the SPIRE Agent, because the workload one was deleted by the registrar, something that we can check if we examine the registrar logs, but this time looking for the keyword "Deleted entry": ```console $ kubectl logs statefulset/spire-server -n spire -c k8s-workload-registrar | grep -A 1 "Deleted entry" ``` -As the registrar handles the custom resources automatically, it also deleted the corresponding SPIFFE ID CRD, something that we can also check by querying the Kubernetes control plane (```kubectl get spiffeids -n spire```), command from which we will obtaing the following: +As the registrar handles the custom resources automatically, it also deleted the corresponding SPIFFE ID CRD, something that we can also check by querying the Kubernetes control plane (`kubectl get spiffeids -n spire`), command which should display the following: ```console NAME AGE -example-cluster-control-plane 41m +minikube 41m spire-agent-r86rz 40m ``` ## Non-annotated pods -Let's check if a pod that has no annotations its considered by the registrar. Deploy a new workload with this condition with the following command: +Let's check if a pod that has no annotations is detected by the registrar. Deploy a new workload without annotations using the following command: ```console $ kubectl apply -f k8s/not-annotated-workload.yaml @@ -626,14 +606,14 @@ $ kubectl apply -f k8s/not-annotated-workload.yaml As in the previous section, let's see the registration entries that are present in the SPIRE Server: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock +$ kubectl exec statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` The result of the command should be equal to the one shown in *Pod deletion* section, because no new entry has been created, as expected. ## SPIFFE ID CRD creation -One of the benefits of using the CRD Mode is that we can manipulate the SPIFFE IDs as if they were resources inside Kubernetes environment, in other words using the *kubectl* command. +One of the benefits of using the CRD mode is that we can manipulate the SPIFFE IDs as if they were resources inside Kubernetes environment, in other words using the `kubectl` command. Let's create a new SPIFFE ID CRD by using: @@ -641,7 +621,7 @@ Let's create a new SPIFFE ID CRD by using: $ kubectl apply -f k8s/test_spiffeid.yaml ``` -We will check if it was created, consulting the custom resources with ```kubectl get spiffeids -n spire```, whose output will show the following: +We will check if it was created, consulting the custom resources with `kubectl get spiffeids -n spire`, the output of which should show the following: ```console NAME AGE @@ -653,7 +633,7 @@ spire-agent-r86rz 45m The resource was succesfully created, but had it any impact on the SPIRE Server? Let's execute the command below to see the registration entries: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock +$ kubectl exec statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` You'll get an output similar to this: @@ -661,12 +641,12 @@ You'll get an output similar to this: ```console Found 3 entries Entry ID : ... -SPIFFE ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +SPIFFE ID : spiffe://example.org/k8s-workload-registrar/demo-cluster/node/minikube Parent ID : spiffe://example.org/spire/server Revision : 1 TTL : default Selector : k8s_psat:agent_node_uid:08990bfd-3551-4761-8a1b-2e652984ffdd -Selector : k8s_psat:cluster:example-cluster +Selector : k8s_psat:cluster:demo-cluster Entry ID : ... SPIFFE ID : spiffe://example.org/test @@ -678,20 +658,20 @@ Selector : k8s:pod-name:my-test-pod Entry ID : ... SPIFFE ID : spiffe://example.org/testing/agent -Parent ID : spiffe://example.org/k8s-workload-registrar/example-cluster/node/example-cluster-control-plane +Parent ID : spiffe://example.org/k8s-workload-registrar/demo-cluster/node/minikube Revision : 1 TTL : default -Selector : k8s:node-name:example-cluster-control-plane +Selector : k8s:node-name:minikube Selector : k8s:ns:spire Selector : k8s:pod-uid:538886bb-48e1-4795-b386-10e97f50e34f DNS name : spire-agent-jzc8w ``` -As we can see, a SPIFFE ID CRD creation triggers a registration entry creation on the SPIRE Server too. +As we can see, SPIFFE ID CRD creation triggers registration entry creation on the SPIRE Server, too. ## SPIFFE ID CRD deletion -The lifecycle of a SPIFFE ID CRD can be managed by Kubernetes, and has a direct impact on the corresponding registration entry stored in the SPIRE Server. We already see how a SPIFFE ID CRD creation activates a registration entry one. We will prove that the same applies for a CRD deletion. +The lifecycle of a SPIFFE ID CRD can be managed by Kubernetes, and has a direct impact on the corresponding registration entry stored in the SPIRE Server. We already saw how SPIFFE ID CRD creation activates registration entry creation. We will prove that the same applies for a CRD deletion. Let's delete the previously created SPIFFE ID CRD, and later check for the registration entries on the server. Run the following command to delete the CRD: @@ -702,15 +682,15 @@ $ kubectl delete spiffeid/my-test-spiffeid -n spire Now, we will check the registration entries: ```console -$ kubectl exec -it statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock +$ kubectl exec statefulset/spire-server -n spire -c spire-server -- bin/spire-server entry show -registrationUDSPath /tmp/spire-server/private/api.sock ``` The output from this command should include only the entries for the node and the agent, because the recently created SPIFFE ID CRD was deleted, along with the entry. ## Teardown -To delete the resources used for this mode, we will delete the cluster by executing: +To delete the resources used for this mode, we will run the `delete-scenario.sh` script: ```console -$ kind delete cluster --name example-cluster +$ bash scripts/delete-scenario.sh ``` diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/spire-agent.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/spire-agent.yaml index 8f75932..52ba97e 100644 --- a/k8s/k8s-workload-registrar/mode-crd/k8s/spire-agent.yaml +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/spire-agent.yaml @@ -1,4 +1,3 @@ -# ServiceAccount for the SPIRE agent apiVersion: v1 kind: ServiceAccount metadata: @@ -7,7 +6,6 @@ metadata: --- -# Required cluster role to allow spire-agent to query k8s API server kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -19,7 +17,6 @@ rules: --- -# Binds above cluster role to spire-agent service account kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -55,7 +52,7 @@ data: plugins { NodeAttestor "k8s_psat" { plugin_data { - cluster = "example-cluster" + cluster = "demo-cluster" } } @@ -106,7 +103,6 @@ spec: annotations: spiffe.io/spiffe-id: "testing/agent" spec: - # hostPID is required for K8S Workload Attestation. hostPID: true hostNetwork: true dnsPolicy: ClusterFirstWithHostNet diff --git a/k8s/k8s-workload-registrar/mode-crd/k8s/spire-server.yaml b/k8s/k8s-workload-registrar/mode-crd/k8s/spire-server.yaml index e53aef6..d925e7e 100644 --- a/k8s/k8s-workload-registrar/mode-crd/k8s/spire-server.yaml +++ b/k8s/k8s-workload-registrar/mode-crd/k8s/spire-server.yaml @@ -12,8 +12,6 @@ kind: ClusterRole metadata: name: k8s-workload-registrar-role rules: - # allow TokenReview requests (to verify service account tokens for PSAT - # attestation) - apiGroups: ["authentication.k8s.io"] resources: ["tokenreviews"] verbs: ["get", "create"] @@ -47,19 +45,15 @@ subjects: --- -# Role for the SPIRE server kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: namespace: spire name: spire-server-role rules: - # allow "get" access to pods (to resolve selectors for PSAT attestation) - apiGroups: [""] resources: ["pods"] verbs: ["get"] - # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE - # agent bootstrapping, see the spire-bundle ConfigMap below) - apiGroups: [""] resources: ["configmaps"] resourceNames: ["spire-bundle"] @@ -67,8 +61,6 @@ rules: --- -# RoleBinding granting the spire-server-role to the SPIRE server -# service account. kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -93,7 +85,6 @@ metadata: --- -# ConfigMap containing the SPIRE server configuration. apiVersion: v1 kind: ConfigMap metadata: @@ -128,7 +119,7 @@ data: NodeAttestor "k8s_psat" { plugin_data { clusters = { - "example-cluster" = { + "demo-cluster" = { service_account_whitelist = ["spire:spire-agent"] } } @@ -168,7 +159,7 @@ data: k8s-workload-registrar.conf: | trust_domain = "example.org" server_socket_path = "/tmp/spire-server/private/api.sock" - cluster = "example-cluster" + cluster = "demo-cluster" mode = "crd" pod_annotation = "spiffe.io/spiffe-id" metrics_bind_addr = "0" @@ -251,7 +242,6 @@ spec: --- -# Service definition for SPIRE server defining the gRPC port. apiVersion: v1 kind: Service metadata: diff --git a/k8s/k8s-workload-registrar/mode-crd/test.sh b/k8s/k8s-workload-registrar/mode-crd/test.sh index 6dbd3be..535c9c6 100755 --- a/k8s/k8s-workload-registrar/mode-crd/test.sh +++ b/k8s/k8s-workload-registrar/mode-crd/test.sh @@ -24,7 +24,7 @@ trap cleanup EXIT cleanup set_env -NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/example-cluster/node/" +NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/demo-cluster/node/" AGENT_SPIFFE_ID="spiffe://example.org/testing/agent" WORKLOAD_SPIFFE_ID="spiffe://example.org/testing/example-workload" diff --git a/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-agent.yaml b/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-agent.yaml index 395d117..7a07ac4 100644 --- a/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-agent.yaml +++ b/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-agent.yaml @@ -1,4 +1,3 @@ -# ServiceAccount for the SPIRE agent apiVersion: v1 kind: ServiceAccount metadata: @@ -7,7 +6,6 @@ metadata: --- -# Required cluster role to allow spire-agent to query k8s API server kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -19,7 +17,6 @@ rules: --- -# Binds above cluster role to spire-agent service account kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -36,9 +33,6 @@ roleRef: --- -# ConfigMap for the SPIRE agent featuring: -# 1) PSAT node attestation -# 2) K8S Workload Attestation over the secure kubelet port apiVersion: v1 kind: ConfigMap metadata: @@ -59,7 +53,7 @@ data: plugins { NodeAttestor "k8s_psat" { plugin_data { - cluster = "example-cluster" + cluster = "demo-cluster" } } @@ -108,7 +102,6 @@ spec: app: spire-agent spire-workload: agent spec: - # hostPID is required for K8S Workload Attestation. hostPID: true hostNetwork: true dnsPolicy: ClusterFirstWithHostNet diff --git a/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-server.yaml b/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-server.yaml index c7f3e36..5f25135 100644 --- a/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-server.yaml +++ b/k8s/k8s-workload-registrar/mode-reconcile/k8s/spire-server.yaml @@ -1,4 +1,3 @@ -# ServiceAccount used by the SPIRE server. apiVersion: v1 kind: ServiceAccount metadata: @@ -7,7 +6,6 @@ metadata: --- -# Required cluster role to allow spire-server to query k8s API server kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -16,15 +14,12 @@ rules: - apiGroups: [""] resources: ["pods", "nodes"] verbs: ["get", "list", "watch"] - # allow TokenReview requests (to verify service account tokens for PSAT - # attestation) - apiGroups: ["authentication.k8s.io"] resources: ["tokenreviews"] verbs: ["get", "create"] --- -# Binds above cluster role to spire-server service account kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -41,19 +36,15 @@ roleRef: --- -# Role for the SPIRE server kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: namespace: spire name: spire-server-role rules: - # allow "get" access to pods (to resolve selectors for PSAT attestation) - apiGroups: [""] resources: ["pods"] verbs: ["get"] - # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE - # agent bootstrapping, see the spire-bundle ConfigMap below) - apiGroups: [""] resources: ["configmaps"] resourceNames: ["spire-bundle"] @@ -71,8 +62,6 @@ rules: --- -# RoleBinding granting the spire-server-role to the SPIRE server -# service account. kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -89,10 +78,6 @@ roleRef: --- -# ConfigMap containing the latest trust bundle for the trust domain. It is -# updated by SPIRE using the k8sbundle notifier plugin. SPIRE agents mount -# this config map and use the certificate to bootstrap trust with the SPIRE -# server during attestation. apiVersion: v1 kind: ConfigMap metadata: @@ -101,7 +86,6 @@ metadata: --- -# ConfigMap containing the SPIRE server configuration. apiVersion: v1 kind: ConfigMap metadata: @@ -136,7 +120,7 @@ data: NodeAttestor "k8s_psat" { plugin_data { clusters = { - "example-cluster" = { + "demo-cluster" = { service_account_whitelist = ["spire:spire-agent"] } } @@ -176,7 +160,7 @@ data: k8s-workload-registrar.conf: | trust_domain = "example.org" server_socket_path = "/tmp/spire-server/private/api.sock" - cluster = "example-cluster" + cluster = "demo-cluster" mode = "reconcile" pod_label = "spire-workload" metrics_addr = "0" @@ -184,9 +168,6 @@ data: --- -# This is the Deployment for the SPIRE server. It waits for SPIRE database to -# initialize and uses the SPIRE healthcheck command for liveness/readiness -# probes. apiVersion: apps/v1 kind: StatefulSet metadata: @@ -262,7 +243,6 @@ spec: --- -# Service definition for SPIRE server defining the gRPC port. apiVersion: v1 kind: Service metadata: diff --git a/k8s/k8s-workload-registrar/mode-reconcile/test.sh b/k8s/k8s-workload-registrar/mode-reconcile/test.sh index 4d37950..3706d18 100755 --- a/k8s/k8s-workload-registrar/mode-reconcile/test.sh +++ b/k8s/k8s-workload-registrar/mode-reconcile/test.sh @@ -22,7 +22,7 @@ trap cleanup EXIT cleanup set_env -NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/example-cluster/node/" +NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/demo-cluster/node/" AGENT_SPIFFE_ID="spiffe://example.org/agent" WORKLOAD_SPIFFE_ID="spiffe://example.org/example-workload" diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/kubeconfig.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/kubeconfig.yaml index 72942c5..f52c150 100644 --- a/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/kubeconfig.yaml +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/admctrl/kubeconfig.yaml @@ -1,5 +1,3 @@ -# KubeConfig with client credentials for the API Server to use to call the -# K8S Workload Registrar service apiVersion: v1 kind: Config users: diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/k8s-workload-registrar-secret.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/k8s-workload-registrar-secret.yaml index 04e2e89..8e79c6f 100644 --- a/k8s/k8s-workload-registrar/mode-webhook/k8s/k8s-workload-registrar-secret.yaml +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/k8s-workload-registrar-secret.yaml @@ -1,4 +1,3 @@ -# Kubernetes Secret containing the K8S Workload Registrar server key apiVersion: v1 kind: Secret metadata: diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-agent.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-agent.yaml index fb10424..52ba97e 100644 --- a/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-agent.yaml +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-agent.yaml @@ -1,4 +1,3 @@ -# ServiceAccount for the SPIRE agent apiVersion: v1 kind: ServiceAccount metadata: @@ -7,7 +6,6 @@ metadata: --- -# Required cluster role to allow spire-agent to query k8s API server kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -19,7 +17,6 @@ rules: --- -# Binds above cluster role to spire-agent service account kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -35,9 +32,6 @@ roleRef: --- -# ConfigMap for the SPIRE agent featuring: -# 1) PSAT node attestation -# 2) K8S Workload Attestation over the secure kubelet port apiVersion: v1 kind: ConfigMap metadata: @@ -58,7 +52,7 @@ data: plugins { NodeAttestor "k8s_psat" { plugin_data { - cluster = "example-cluster" + cluster = "demo-cluster" } } @@ -109,7 +103,6 @@ spec: annotations: spiffe.io/spiffe-id: "testing/agent" spec: - # hostPID is required for K8S Workload Attestation. hostPID: true hostNetwork: true dnsPolicy: ClusterFirstWithHostNet diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-server.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-server.yaml index f889385..8639742 100644 --- a/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-server.yaml +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/spire-server.yaml @@ -1,4 +1,3 @@ -# ServiceAccount used by the SPIRE server. apiVersion: v1 kind: ServiceAccount metadata: @@ -7,7 +6,6 @@ metadata: --- -# Required cluster role to allow spire-server to query k8s API server kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -24,7 +22,6 @@ rules: --- -# Binds above cluster role to spire-server service account kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -41,19 +38,15 @@ roleRef: --- -# Role for the SPIRE server kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: namespace: spire name: spire-server-role rules: - # allow "get" access to pods (to resolve selectors for PSAT attestation) - apiGroups: [""] resources: ["pods"] verbs: ["get"] - # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE - # agent bootstrapping, see the spire-bundle ConfigMap below) - apiGroups: [""] resources: ["configmaps"] resourceNames: ["spire-bundle"] @@ -61,8 +54,6 @@ rules: --- -# RoleBinding granting the spire-server-role to the SPIRE server -# service account. kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -79,10 +70,6 @@ roleRef: --- -# ConfigMap containing the latest trust bundle for the trust domain. It is -# updated by SPIRE using the k8sbundle notifier plugin. SPIRE agents mount -# this config map and use the certificate to bootstrap trust with the SPIRE -# server during attestation. apiVersion: v1 kind: ConfigMap metadata: @@ -91,7 +78,6 @@ metadata: --- -# ConfigMap containing the SPIRE server configuration. apiVersion: v1 kind: ConfigMap metadata: @@ -126,7 +112,7 @@ data: NodeAttestor "k8s_psat" { plugin_data { clusters = { - "example-cluster" = { + "demo-cluster" = { service_account_whitelist = ["spire:spire-agent"] } } @@ -166,7 +152,7 @@ data: k8s-workload-registrar.conf: | trust_domain = "example.org" server_socket_path = "/tmp/spire-server/private/api.sock" - cluster = "example-cluster" + cluster = "demo-cluster" mode = "webhook" cert_path = "/run/spire/k8s-workload-registrar/certs/server-cert.pem" key_path = "/run/spire/k8s-workload-registrar/secret/server-key.pem" @@ -174,9 +160,6 @@ data: --- -# ConfigMap containing the K8S Workload Registrar server certificate and -# CA bundle used to verify the client certificate presented by the API server. -# apiVersion: v1 kind: ConfigMap metadata: @@ -213,9 +196,6 @@ data: --- -# This is the Deployment for the SPIRE server. It waits for SPIRE database to -# initialize and uses the SPIRE healthcheck command for liveness/readiness -# probes. apiVersion: apps/v1 kind: StatefulSet metadata: @@ -303,7 +283,6 @@ spec: --- -# Service definition for SPIRE server defining the gRPC port. apiVersion: v1 kind: Service metadata: @@ -321,7 +300,6 @@ spec: --- -# Service definition for the admission webhook apiVersion: v1 kind: Service metadata: diff --git a/k8s/k8s-workload-registrar/mode-webhook/k8s/validation-webhook.yaml b/k8s/k8s-workload-registrar/mode-webhook/k8s/validation-webhook.yaml index 0c20483..5509aba 100644 --- a/k8s/k8s-workload-registrar/mode-webhook/k8s/validation-webhook.yaml +++ b/k8s/k8s-workload-registrar/mode-webhook/k8s/validation-webhook.yaml @@ -1,5 +1,3 @@ -# Validating Webhook Configuration for the K8S Workload Registrar -# apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: diff --git a/k8s/k8s-workload-registrar/mode-webhook/test.sh b/k8s/k8s-workload-registrar/mode-webhook/test.sh index 6efb4fb..7751c6d 100755 --- a/k8s/k8s-workload-registrar/mode-webhook/test.sh +++ b/k8s/k8s-workload-registrar/mode-webhook/test.sh @@ -22,7 +22,7 @@ trap cleanup EXIT cleanup set_env -NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/example-cluster/node" +NODE_SPIFFE_ID="spiffe://example.org/k8s-workload-registrar/demo-cluster/node" AGENT_SPIFFE_ID="spiffe://example.org/ns/spire/sa/spire-agent" WORKLOAD_SPIFFE_ID="spiffe://example.org/ns/spire/sa/default"